From 54916bd156ea387b6f30d798a22663456410c3de Mon Sep 17 00:00:00 2001 From: Kosuke Morimoto Date: Wed, 15 Jan 2025 16:39:33 +0900 Subject: [PATCH] fix --- .all-contributorsrc | 30 + .cspell.json | 2699 +++++++++++- .gitattributes | 50 +- .gitfiles | 175 +- .github/ISSUE_TEMPLATE/bug_report.md | 16 +- .../ISSUE_TEMPLATE/security_issue_report.md | 16 +- .github/PULL_REQUEST_TEMPLATE.md | 20 +- .github/actions/deploy-chaos-mesh/action.yaml | 2 +- .../detect-docker-image-tags/action.yaml | 3 +- .../determine-docker-image-tag/action.yaml | 2 +- .github/actions/docker-build/action.yaml | 2 +- .github/actions/dump-context/action.yaml | 2 +- .../e2e-deploy-vald-helm-operator/action.yaml | 2 +- .../e2e-deploy-vald-readreplica/action.yaml | 2 +- .github/actions/e2e-deploy-vald/action.yaml | 2 +- .github/actions/notify-slack/action.yaml | 2 +- .github/actions/scan-docker-image/action.yaml | 2 +- .github/actions/setup-e2e/action.yaml | 2 +- .github/actions/setup-go/action.yaml | 19 +- .github/actions/setup-helm/action.yaml | 21 +- .github/actions/setup-k3d/action.yaml | 19 +- .github/actions/setup-yq/action.yaml | 2 +- .../actions/wait-for-docker-image/action.yaml | 2 +- .github/dependabot.yml | 182 - .github/helm/values/values-correction.yaml | 2 +- .github/labeler.yml | 181 - .github/workflows/_detect-ci-container.yml | 48 - .github/workflows/_release-pr.yml | 134 - .../workflows/dockers-agent-faiss-image.yaml | 268 ++ .../workflows/dockers-agent-faiss-image.yml | 69 - .github/workflows/dockers-agent-image.yaml | 74 + .github/workflows/dockers-agent-image.yml | 65 - .../workflows/dockers-agent-ngt-image.yaml | 272 ++ .github/workflows/dockers-agent-ngt-image.yml | 69 - .../dockers-agent-sidecar-image.yaml | 298 ++ .../workflows/dockers-agent-sidecar-image.yml | 67 - .../dockers-benchmark-job-image.yaml | 264 ++ .../workflows/dockers-benchmark-job-image.yml | 65 - .../dockers-benchmark-operator-image.yaml | 257 +- .github/workflows/dockers-binfmt-image.yaml | 43 +- .github/workflows/dockers-buildbase-image.yml | 48 - .github/workflows/dockers-buildkit-image.yaml | 43 +- .../dockers-buildkit-syft-scanner-image.yaml | 43 +- .../workflows/dockers-ci-container-image.yml | 54 - .../workflows/dockers-dev-container-image.yml | 49 - .../dockers-discoverer-k8s-image.yaml | 262 ++ .../dockers-discoverer-k8s-image.yml | 63 - .../dockers-example-client-image.yaml | 68 + .../dockers-gateway-filter-image.yaml | 262 ++ .../dockers-gateway-filter-image.yml | 69 - .../workflows/dockers-gateway-lb-image.yaml | 260 ++ .../workflows/dockers-gateway-lb-image.yml | 69 - .../dockers-gateway-mirror-image.yaml | 263 +- .../dockers-helm-operator-image.yaml | 68 + .../workflows/dockers-helm-operator-image.yml | 59 - .github/workflows/dockers-image-scan.yml | 92 - .../dockers-index-correction-image.yaml | 252 ++ .../workflows/dockers-index-correction.yml | 63 - .../dockers-index-creation-image.yaml | 246 ++ .github/workflows/dockers-index-creation.yml | 63 - .../dockers-index-deletion-image.yaml | 246 ++ .../dockers-index-operator-image.yaml | 244 ++ .github/workflows/dockers-index-operator.yml | 63 - .../workflows/dockers-index-save-image.yaml | 246 ++ .github/workflows/dockers-index-save.yml | 63 - .github/workflows/dockers-loadtest-image.yaml | 217 + .github/workflows/dockers-loadtest-image.yml | 66 - .../dockers-manager-index-image.yaml | 266 ++ .../workflows/dockers-manager-index-image.yml | 65 - .../dockers-readreplica-rotate-image.yaml | 242 ++ .../workflows/dockers-readreplica-rotate.yml | 63 - .../dockers-release-branch-image.yaml | 6 + .github/workflows/e2e-chaos.yaml | 2 +- .github/workflows/e2e-code-bench-agent.yaml | 8 +- .github/workflows/e2e-max-dim.yml | 122 - .github/workflows/e2e-profiling.yml | 196 - .github/workflows/e2e.yml | 426 -- .github/workflows/semver-major-minor.yaml | 2 +- .github/workflows/semver-patch.yaml | 2 +- .github/workflows/unit-test-rust.yaml | 51 + .github/workflows/unit-test.yaml | 6 +- .github/workflows/update-actions.yaml | 64 - .github/workflows/update-web-docs.yml | 43 - .prh.yaml | 5 - CHANGELOG.md | 328 +- Makefile | 110 +- Makefile.d/build.mk | 20 + Makefile.d/dependencies.mk | 19 +- Makefile.d/docker.mk | 72 +- Makefile.d/e2e.mk | 43 +- Makefile.d/functions.mk | 57 +- Makefile.d/git.mk | 2 +- Makefile.d/helm.mk | 100 +- Makefile.d/k8s.mk | 94 +- Makefile.d/kind.mk | 3 +- Makefile.d/proto.mk | 37 +- Makefile.d/test.mk | 10 + Makefile.d/tools.mk | 70 +- README.md | 6 +- apis/docs/buf.gen.doc.yaml | 27 + apis/docs/buf.gen.payload.yaml | 29 + apis/docs/buf.gen.tmpl.yaml | 27 + apis/docs/v1/doc.tmpl | 66 + apis/docs/v1/docs.md | 26 +- apis/docs/v1/filter.md | 1552 +++++++ apis/docs/v1/flush.md | 68 + apis/docs/v1/index.md | 473 +++ apis/docs/v1/insert.md | 420 ++ apis/docs/v1/mirror.md | 87 + apis/docs/v1/object.md | 430 ++ apis/docs/v1/payload.md.tmpl | 2626 ++++++++++++ apis/docs/v1/payload.tmpl | 100 + apis/docs/v1/remove.md | 446 ++ apis/docs/v1/search.md | 1888 +++++++++ apis/docs/v1/update.md | 499 +++ apis/docs/v1/upsert.md | 436 ++ apis/grpc/v1/agent/core/agent.pb.go | 2 +- apis/grpc/v1/agent/sidecar/sidecar.pb.go | 2 +- .../v1/agent/sidecar/sidecar_vtproto.pb.go | 2 +- apis/grpc/v1/discoverer/discoverer.pb.go | 2 +- .../grpc/v1/filter/egress/egress_filter.pb.go | 2 +- .../v1/filter/ingress/ingress_filter.pb.go | 2 +- apis/grpc/v1/meta/meta.pb.go | 2 +- apis/grpc/v1/mirror/mirror.pb.go | 2 +- apis/grpc/v1/payload/payload.pb.go | 2825 +++++++------ apis/grpc/v1/payload/payload.pb.json.go | 10 + apis/grpc/v1/payload/payload_vtproto.pb.go | 243 ++ .../v1/rpc/errdetails/error_details.pb.go | 155 +- apis/grpc/v1/vald/filter.pb.go | 2 +- apis/grpc/v1/vald/flush.pb.go | 2 +- apis/grpc/v1/vald/index.pb.go | 2 +- apis/grpc/v1/vald/insert.pb.go | 2 +- apis/grpc/v1/vald/object.pb.go | 2 +- apis/grpc/v1/vald/remove.pb.go | 2 +- apis/grpc/v1/vald/search.pb.go | 2 +- apis/grpc/v1/vald/update.pb.go | 48 +- apis/grpc/v1/vald/update_vtproto.pb.go | 45 + apis/grpc/v1/vald/upsert.pb.go | 2 +- apis/grpc/v1/vald/vald.go | 1 + apis/proto/v1/mirror/mirror.proto | 14 +- apis/proto/v1/vald/filter.proto | 162 +- apis/proto/v1/vald/flush.proto | 14 +- apis/proto/v1/vald/index.proto | 8 + apis/proto/v1/vald/insert.proto | 85 +- apis/proto/v1/vald/object.proto | 91 +- apis/proto/v1/vald/remove.proto | 114 +- apis/proto/v1/vald/search.proto | 336 +- apis/proto/v1/vald/update.proto | 107 +- apis/proto/v1/vald/upsert.proto | 89 +- apis/swagger/v1/vald/update.swagger.json | 51 + buf.gen.yaml | 43 +- buf.lock | 9 + charts/vald-benchmark-operator/Chart.yaml | 2 +- charts/vald-benchmark-operator/README.md | 8 +- .../crds/valdbenchmarkjob.yaml | 177 +- .../crds/valdbenchmarkoperatorrelease.yaml | 136 +- .../crds/valdbenchmarkscenario.yaml | 24 +- .../schemas/job-values.schema.json | 5 +- .../schemas/job-values.yaml | 4 +- .../templates/deployment.yaml | 2 +- .../values.schema.json | 15 +- charts/vald-benchmark-operator/values.yaml | 10 +- charts/vald-helm-operator/Chart.yaml | 2 +- charts/vald-helm-operator/README.md | 10 +- .../crds/valdhelmoperatorrelease.yaml | 32 +- .../vald-helm-operator/crds/valdrelease.yaml | 3650 ++++++++++------- charts/vald-helm-operator/values.yaml | 2 +- charts/vald-readreplica/Chart.yaml | 2 +- charts/vald-readreplica/README.md | 2 +- charts/vald/Chart.yaml | 2 +- charts/vald/README.md | 2082 +++++----- charts/vald/crds/valdmirrortarget.yaml | 8 +- charts/vald/templates/_helpers.tpl | 13 + charts/vald/templates/agent/pdb.yaml | 1 + charts/vald/templates/discoverer/pdb.yaml | 1 + charts/vald/templates/gateway/filter/pdb.yaml | 1 + charts/vald/templates/gateway/ing.yaml | 193 +- charts/vald/templates/gateway/lb/pdb.yaml | 1 + charts/vald/templates/gateway/mirror/pdb.yaml | 1 + charts/vald/templates/manager/index/pdb.yaml | 1 + charts/vald/values.schema.json | 1290 +++++- charts/vald/values.yaml | 180 +- dockers/agent/core/agent/Dockerfile | 15 +- dockers/agent/core/faiss/Dockerfile | 15 +- dockers/agent/core/ngt/Dockerfile | 15 +- dockers/agent/sidecar/Dockerfile | 11 +- dockers/binfmt/Dockerfile | 1 + dockers/buildbase/Dockerfile | 1 + dockers/buildkit/Dockerfile | 1 + dockers/buildkit/syft/scanner/Dockerfile | 1 + dockers/ci/base/Dockerfile | 29 +- dockers/dev/Dockerfile | 39 +- dockers/discoverer/k8s/Dockerfile | 11 +- dockers/gateway/filter/Dockerfile | 11 +- dockers/gateway/lb/Dockerfile | 11 +- dockers/gateway/mirror/Dockerfile | 11 +- dockers/index/job/correction/Dockerfile | 11 +- dockers/index/job/creation/Dockerfile | 11 +- .../index/job/readreplica/rotate/Dockerfile | 11 +- dockers/index/job/save/Dockerfile | 11 +- dockers/index/operator/Dockerfile | 11 +- dockers/manager/index/Dockerfile | 11 +- dockers/operator/helm/Dockerfile | 11 +- dockers/tools/benchmark/job/Dockerfile | 15 +- dockers/tools/benchmark/operator/Dockerfile | 11 +- dockers/tools/cli/loadtest/Dockerfile | 15 +- docs/api/insert.md | 6 +- docs/api/object.md | 6 +- docs/api/remove.md | 8 +- docs/api/search.md | 24 +- docs/api/update.md | 6 +- docs/api/upsert.md | 6 +- docs/contributing/coding-style.md | 2 +- docs/contributing/unit-test-guideline.md | 2 +- docs/overview/about-vald.md | 2 +- docs/performance/continuous-benchmark.md | 36 +- docs/troubleshooting/client-side.md | 2 +- docs/tutorial/get-started-with-faiss-agent.md | 4 +- docs/tutorial/get-started.md | 2 +- docs/tutorial/vald-agent-standalone-on-k8s.md | 2 +- docs/tutorial/vald-multicluster-on-k8s.md | 10 +- docs/user-guides/client-api-config.md | 10 +- docs/user-guides/cluster-role-binding.md | 2 +- docs/user-guides/filtering-configuration.md | 2 +- .../observability-configuration.md | 2 +- docs/user-guides/sdks.md | 1 - example/client/agent/main.go | 2 +- example/client/go.mod | 42 +- example/client/go.mod.default | 4 +- example/client/go.sum | 58 +- example/client/main.go | 2 +- example/client/mirror/main.go | 2 +- example/helm/values.yaml | 2 +- go.mod | 405 +- go.sum | 735 ++-- hack/docker/gen/main.go | 788 +++- hack/git/hooks/pre-commit | 1 + hack/go.mod.default | 26 +- hack/helm/schema/crd/main.go | 2 +- hack/license/gen/main.go | 18 +- internal/backoff/backoff.go | 32 +- internal/backoff/backoff_test.go | 18 +- internal/cache/cache.go | 2 +- internal/cache/gache/gache.go | 2 +- internal/cache/gache/gache_test.go | 12 +- internal/cache/gache/option.go | 2 +- internal/cache/gache/option_test.go | 22 +- internal/cache/option.go | 6 +- internal/cache/option_test.go | 6 +- internal/circuitbreaker/breaker.go | 4 +- internal/circuitbreaker/breaker_test.go | 54 +- internal/circuitbreaker/options.go | 2 +- .../client/v1/client/agent/core/client.go | 6 +- .../client/v1/client/discoverer/discover.go | 134 +- internal/client/v1/client/vald/vald.go | 34 + internal/client/v1/client/vald/vald_test.go | 440 ++ internal/compress/gob_test.go | 6 +- internal/compress/lz4_test.go | 8 +- internal/config/cassandra_test.go | 2 +- internal/config/faiss_test.go | 4 + internal/config/grpc.go | 87 +- internal/config/grpc_test.go | 12 +- internal/config/index_deleter.go | 61 + internal/config/log.go | 2 +- internal/config/net.go | 2 + internal/config/server.go | 48 +- internal/config/server_test.go | 16 +- internal/core/algorithm/faiss/faiss.go | 2 +- internal/core/algorithm/faiss/option.go | 3 +- internal/core/algorithm/ngt/ngt.go | 131 +- internal/core/algorithm/ngt/ngt_test.go | 111 +- internal/db/rdb/mysql/dbr/dbr.go | 10 +- internal/db/rdb/mysql/dbr/insert.go | 4 +- internal/db/rdb/mysql/dbr/session.go | 6 +- internal/db/rdb/mysql/dbr/tx.go | 4 +- internal/db/rdb/mysql/mysql_test.go | 22 +- internal/db/rdb/mysql/option.go | 4 +- .../db/storage/blob/cloudstorage/option.go | 2 +- internal/db/storage/blob/s3/reader/option.go | 2 +- internal/db/storage/blob/s3/s3_test.go | 2 +- .../storage/blob/s3/session/session_test.go | 4 +- internal/errors/agent.go | 46 + internal/errors/agent_test.go | 1405 ++++++- internal/errors/corrector.go | 5 +- internal/errors/errors.go | 19 +- internal/errors/grpc.go | 2 +- internal/errors/net.go | 8 +- internal/errors/ngt.go | 29 - internal/errors/ngt_test.go | 1405 ------- internal/errors/option_test.go | 10 +- internal/errors/redis.go | 2 +- internal/errors/redis_test.go | 36 +- internal/errors/tls.go | 4 +- internal/errors/usearch_test.go | 189 + internal/errors/vald.go | 2 +- internal/file/file.go | 26 +- internal/info/info.go | 66 +- internal/k8s/job/job.go | 4 +- internal/log/level/level.go | 52 +- internal/log/option_test.go | 6 +- internal/net/dialer.go | 83 +- internal/net/dialer_test.go | 13 +- internal/net/grpc/client.go | 67 +- internal/net/grpc/client_test.go | 1748 ++++---- internal/net/grpc/codes/codes.go | 45 + internal/net/grpc/codes/codes_test.go | 101 + internal/net/grpc/errdetails/errdetails.go | 147 +- .../net/grpc/errdetails/errdetails_test.go | 96 + internal/net/grpc/health/health.go | 15 +- internal/net/grpc/health/health_test.go | 11 +- .../grpc/interceptor/client/metric/metric.go | 4 +- .../interceptor/server/logging/accesslog.go | 35 +- .../server/logging/accesslog_test.go | 108 + .../grpc/interceptor/server/metric/metric.go | 4 +- internal/net/grpc/option.go | 328 +- internal/net/grpc/pool/pool.go | 89 +- internal/net/grpc/pool/pool_bench_test.go | 4 +- internal/net/grpc/server.go | 64 + internal/net/grpc/status/status.go | 618 ++- internal/net/grpc/status/status_test.go | 343 ++ internal/net/http/json/json.go | 2 +- internal/net/http/json/json_test.go | 14 +- internal/net/net.go | 35 +- internal/net/net_test.go | 2 +- internal/observability/exporter/otlp/otlp.go | 2 +- internal/observability/metrics/mem/mem.go | 2 +- internal/observability/trace/status.go | 2 +- internal/params/params_test.go | 4 +- internal/safety/safety.go | 9 +- internal/servers/server/option.go | 44 +- internal/servers/server/option_test.go | 805 +++- internal/servers/server/server.go | 71 +- internal/servers/servers.go | 15 +- internal/servers/servers_test.go | 8 +- internal/strings/strings.go | 1 - .../sync/semaphore/semaphore_bench_test.go | 3 - internal/sync/singleflight/singleflight.go | 30 +- .../sync/singleflight/singleflight_test.go | 52 +- internal/test/data/vector/gen_test.go | 18 +- internal/test/mock/grpc/grpc_client_mock.go | 14 +- internal/test/mock/grpc_testify_mock.go | 2 + internal/tls/tls.go | 2 +- internal/worker/queue.go | 6 +- internal/worker/queue_option.go | 2 +- k8s/agent/ngt/configmap.yaml | 9 +- k8s/agent/pdb.yaml | 5 +- k8s/agent/priorityclass.yaml | 4 +- k8s/agent/statefulset.yaml | 4 +- k8s/agent/svc.yaml | 4 +- k8s/discoverer/clusterrole.yaml | 4 +- k8s/discoverer/clusterrolebinding.yaml | 4 +- k8s/discoverer/configmap.yaml | 10 +- k8s/discoverer/deployment.yaml | 6 +- k8s/discoverer/pdb.yaml | 5 +- k8s/discoverer/priorityclass.yaml | 4 +- k8s/discoverer/serviceaccount.yaml | 4 +- k8s/discoverer/svc.yaml | 4 +- k8s/gateway/gateway/ing.yaml | 21 +- k8s/gateway/gateway/lb/configmap.yaml | 27 +- k8s/gateway/gateway/lb/deployment.yaml | 8 +- k8s/gateway/gateway/lb/hpa.yaml | 4 +- k8s/gateway/gateway/lb/pdb.yaml | 5 +- k8s/gateway/gateway/lb/priorityclass.yaml | 4 +- k8s/gateway/gateway/lb/svc.yaml | 4 +- k8s/index/job/correction/configmap.yaml | 28 +- k8s/index/job/correction/cronjob.yaml | 19 +- k8s/index/job/creation/configmap.yaml | 19 +- k8s/index/job/creation/cronjob.yaml | 19 +- k8s/index/job/save/configmap.yaml | 19 +- k8s/index/job/save/cronjob.yaml | 19 +- k8s/index/operator/configmap.yaml | 6 +- k8s/index/operator/deployment.yaml | 6 +- k8s/index/operator/priorityclass.yaml | 4 +- k8s/manager/index/configmap.yaml | 19 +- k8s/manager/index/deployment.yaml | 8 +- k8s/manager/index/pdb.yaml | 5 +- k8s/manager/index/priorityclass.yaml | 4 +- k8s/manager/index/svc.yaml | 4 +- k8s/metrics/loki/loki.yaml | 2 +- k8s/metrics/loki/promtail.yaml | 6 +- k8s/metrics/profefe/cronjob.yaml | 2 +- k8s/metrics/pyroscope/base/deployment.yaml | 2 +- k8s/metrics/tempo/tempo.yaml | 4 +- .../helm/crds/valdhelmoperatorrelease.yaml | 32 +- k8s/operator/helm/crds/valdrelease.yaml | 3650 ++++++++++------- k8s/operator/helm/operator.yaml | 6 +- k8s/operator/helm/svc.yaml | 4 +- k8s/readreplica/configmap.yaml | 29 +- k8s/tools/benchmark/operator/configmap.yaml | 6 +- .../operator/crds/valdbenchmarkjob.yaml | 177 +- .../crds/valdbenchmarkoperatorrelease.yaml | 136 +- .../operator/crds/valdbenchmarkscenario.yaml | 24 +- k8s/tools/benchmark/operator/deployment.yaml | 6 +- k8s/tools/benchmark/operator/service.yaml | 4 +- pkg/agent/core/faiss/handler/grpc/option.go | 2 +- pkg/agent/core/faiss/service/faiss.go | 113 +- pkg/agent/core/faiss/service/faiss_test.go | 1227 +++++- pkg/agent/core/ngt/handler/grpc/index.go | 6 + pkg/agent/core/ngt/handler/grpc/insert.go | 12 +- .../core/ngt/handler/grpc/linear_search.go | 90 +- pkg/agent/core/ngt/handler/grpc/object.go | 12 +- .../core/ngt/handler/grpc/object_test.go | 2 +- pkg/agent/core/ngt/handler/grpc/option.go | 2 +- pkg/agent/core/ngt/handler/grpc/remove.go | 36 +- pkg/agent/core/ngt/handler/grpc/search.go | 90 +- pkg/agent/core/ngt/handler/grpc/update.go | 110 +- .../core/ngt/handler/grpc/update_test.go | 129 + pkg/agent/core/ngt/handler/grpc/upsert.go | 29 +- pkg/agent/core/ngt/service/ngt.go | 437 +- pkg/agent/core/ngt/service/ngt_test.go | 2248 +++++++--- pkg/agent/internal/kvs/kvs.go | 10 +- pkg/agent/internal/kvs/kvs_test.go | 32 +- pkg/agent/internal/vqueue/queue.go | 227 +- pkg/agent/internal/vqueue/queue_test.go | 1035 ++++- pkg/agent/internal/vqueue/stateful_test.go | 12 +- pkg/discoverer/k8s/handler/grpc/option.go | 3 +- pkg/discoverer/k8s/service/discover.go | 78 +- pkg/gateway/filter/handler/grpc/handler.go | 500 +-- pkg/gateway/filter/handler/grpc/option.go | 2 +- .../internal/location/location_test.go | 14 - pkg/gateway/lb/handler/grpc/aggregation.go | 38 +- pkg/gateway/lb/handler/grpc/handler.go | 973 +++-- pkg/gateway/lb/handler/grpc/handler_test.go | 274 ++ pkg/gateway/lb/handler/grpc/option.go | 2 +- pkg/gateway/lb/service/gateway.go | 7 +- pkg/gateway/mirror/handler/grpc/handler.go | 502 +-- pkg/gateway/mirror/handler/grpc/option.go | 2 +- pkg/gateway/mirror/service/gateway.go | 22 +- pkg/gateway/mirror/service/gateway_test.go | 4 +- pkg/gateway/mirror/service/mirror.go | 67 +- pkg/gateway/mirror/service/mirror_option.go | 4 +- pkg/gateway/mirror/service/mirror_test.go | 150 +- pkg/gateway/mirror/usecase/vald.go | 10 +- pkg/index/job/correction/service/corrector.go | 790 ++-- .../job/correction/service/corrector_test.go | 781 ++++ pkg/index/job/correction/usecase/corrector.go | 9 +- pkg/index/job/creation/service/indexer.go | 2 +- pkg/index/job/creation/usecase/creation.go | 2 +- .../job/readreplica/rotate/service/rotator.go | 6 +- .../job/readreplica/rotate/usecase/rotate.go | 2 +- pkg/index/job/save/service/indexer.go | 2 +- pkg/index/job/save/service/indexer_test.go | 2 +- pkg/index/job/save/usecase/save.go | 2 +- pkg/index/operator/service/operator.go | 6 +- pkg/index/operator/usecase/operator.go | 2 +- pkg/manager/index/service/indexer.go | 14 +- pkg/manager/index/service/indexer_test.go | 18 +- pkg/tools/benchmark/job/config/config.go | 2 +- .../benchmark/operator/service/operator.go | 4 +- .../operator/service/operator_test.go | 2 +- rust/Cargo.lock | 2923 +++++++++++-- rust/Cargo.toml | 7 +- rust/bin/agent/Cargo.toml | 12 +- rust/bin/agent/src/handler.rs | 31 +- rust/bin/agent/src/handler/index.rs | 18 +- rust/bin/agent/src/handler/insert.rs | 6 +- rust/bin/agent/src/handler/remove.rs | 8 +- rust/bin/agent/src/handler/search.rs | 100 +- rust/bin/agent/src/handler/update.rs | 14 +- rust/bin/agent/src/handler/upsert.rs | 6 +- rust/bin/agent/src/main.rs | 24 +- rust/libs/algorithm/Cargo.toml | 4 + rust/libs/algorithm/src/lib.rs | 37 +- rust/libs/algorithms/ngt/Cargo.toml | 6 +- rust/libs/proto/Cargo.toml | 6 +- rust/libs/proto/src/lib.rs | 1 + rust/libs/proto/src/payload.v1.rs | 402 +- rust/libs/proto/src/rpc.v1.rs | 56 + rust/libs/proto/src/vald.v1.tonic.rs | 88 + rust/rust-toolchain | 2 +- rust/rust-toolchain.toml | 2 +- tests/chaos/chart/README.md | 2 +- tests/e2e/crud/crud_faiss_test.go | 4 +- tests/e2e/crud/crud_test.go | 60 +- tests/e2e/operation/multi.go | 16 +- tests/e2e/operation/operation.go | 31 +- tests/e2e/operation/stream.go | 38 +- tests/e2e/performance/max_vector_dim_test.go | 5 +- .../agent/core/ngt/service/ngt_e2s_test.go | 4 +- tests/performance/max_vector_dim_test.go | 14 +- versions/BUF_VERSION | 2 +- versions/CHAOS_MESH_VERSION | 2 +- versions/CMAKE_VERSION | 2 +- versions/DOCKER_VERSION | 2 +- versions/FAISS_VERSION | 2 +- versions/GOLANGCILINT_VERSION | 2 +- versions/GO_VERSION | 2 +- versions/HDF5_VERSION | 2 +- versions/HELM_VERSION | 2 +- versions/K3S_VERSION | 2 +- versions/KIND_VERSION | 2 +- versions/KUBECTL_VERSION | 2 +- versions/KUBELINTER_VERSION | 2 +- versions/NGT_VERSION | 2 +- versions/OPERATOR_SDK_VERSION | 2 +- versions/PROMETHEUS_STACK_VERSION | 2 +- versions/PROTOBUF_VERSION | 2 +- versions/REVIEWDOG_VERSION | 2 +- versions/RUST_VERSION | 2 +- versions/TELEPRESENCE_VERSION | 2 +- versions/USEARCH_VERSION | 1 + versions/VALD_VERSION | 2 +- versions/YQ_VERSION | 2 +- versions/actions/ACTIONS_CACHE | 2 +- versions/actions/ACTIONS_CHECKOUT | 2 +- versions/actions/ACTIONS_SETUP_GO | 2 +- versions/actions/ACTIONS_SETUP_NODE | 2 +- versions/actions/ACTIONS_UPLOAD_ARTIFACT | 2 +- versions/actions/CODECOV_CODECOV_ACTION | 2 +- .../actions/CRAZY_MAX_GHACTION_IMPORT_GPG | 2 +- versions/actions/DOCKER_SETUP_BUILDX_ACTION | 2 +- versions/actions/GITHUB_CODEQL_ACTION_ANALYZE | 2 +- .../actions/GITHUB_CODEQL_ACTION_AUTOBUILD | 2 +- versions/actions/GITHUB_CODEQL_ACTION_INIT | 2 +- .../actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF | 2 +- versions/actions/GITHUB_ISSUE_METRICS | 2 +- .../PETER_EVANS_CREATE_ISSUE_FROM_FILE | 2 +- .../actions/PETER_EVANS_CREATE_PULL_REQUEST | 2 +- versions/actions/REVIEWDOG_ACTION_HADOLINT | 2 +- .../actions/REVIEWDOG_ACTION_LANGUAGETOOL | 2 +- .../SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET | 2 +- versions/actions/SOFTPROPS_ACTION_GH_RELEASE | 2 +- 522 files changed, 47096 insertions(+), 16779 deletions(-) delete mode 100644 .github/dependabot.yml delete mode 100644 .github/labeler.yml delete mode 100644 .github/workflows/_detect-ci-container.yml delete mode 100644 .github/workflows/_release-pr.yml create mode 100644 .github/workflows/dockers-agent-faiss-image.yaml delete mode 100644 .github/workflows/dockers-agent-faiss-image.yml create mode 100644 .github/workflows/dockers-agent-image.yaml delete mode 100644 .github/workflows/dockers-agent-image.yml create mode 100644 .github/workflows/dockers-agent-ngt-image.yaml delete mode 100644 .github/workflows/dockers-agent-ngt-image.yml create mode 100644 .github/workflows/dockers-agent-sidecar-image.yaml delete mode 100644 .github/workflows/dockers-agent-sidecar-image.yml create mode 100644 .github/workflows/dockers-benchmark-job-image.yaml delete mode 100644 .github/workflows/dockers-benchmark-job-image.yml delete mode 100644 .github/workflows/dockers-buildbase-image.yml delete mode 100644 .github/workflows/dockers-ci-container-image.yml delete mode 100644 .github/workflows/dockers-dev-container-image.yml create mode 100644 .github/workflows/dockers-discoverer-k8s-image.yaml delete mode 100644 .github/workflows/dockers-discoverer-k8s-image.yml create mode 100644 .github/workflows/dockers-example-client-image.yaml create mode 100644 .github/workflows/dockers-gateway-filter-image.yaml delete mode 100644 .github/workflows/dockers-gateway-filter-image.yml create mode 100644 .github/workflows/dockers-gateway-lb-image.yaml delete mode 100644 .github/workflows/dockers-gateway-lb-image.yml create mode 100644 .github/workflows/dockers-helm-operator-image.yaml delete mode 100644 .github/workflows/dockers-helm-operator-image.yml delete mode 100644 .github/workflows/dockers-image-scan.yml create mode 100644 .github/workflows/dockers-index-correction-image.yaml delete mode 100644 .github/workflows/dockers-index-correction.yml create mode 100644 .github/workflows/dockers-index-creation-image.yaml delete mode 100644 .github/workflows/dockers-index-creation.yml create mode 100644 .github/workflows/dockers-index-deletion-image.yaml create mode 100644 .github/workflows/dockers-index-operator-image.yaml delete mode 100644 .github/workflows/dockers-index-operator.yml create mode 100644 .github/workflows/dockers-index-save-image.yaml delete mode 100644 .github/workflows/dockers-index-save.yml create mode 100644 .github/workflows/dockers-loadtest-image.yaml delete mode 100644 .github/workflows/dockers-loadtest-image.yml create mode 100644 .github/workflows/dockers-manager-index-image.yaml delete mode 100644 .github/workflows/dockers-manager-index-image.yml create mode 100644 .github/workflows/dockers-readreplica-rotate-image.yaml delete mode 100644 .github/workflows/dockers-readreplica-rotate.yml delete mode 100644 .github/workflows/e2e-max-dim.yml delete mode 100644 .github/workflows/e2e-profiling.yml delete mode 100644 .github/workflows/e2e.yml create mode 100644 .github/workflows/unit-test-rust.yaml create mode 100644 apis/docs/buf.gen.doc.yaml create mode 100644 apis/docs/buf.gen.payload.yaml create mode 100644 apis/docs/buf.gen.tmpl.yaml create mode 100644 apis/docs/v1/doc.tmpl create mode 100644 apis/docs/v1/filter.md create mode 100644 apis/docs/v1/flush.md create mode 100644 apis/docs/v1/index.md create mode 100644 apis/docs/v1/insert.md create mode 100644 apis/docs/v1/mirror.md create mode 100644 apis/docs/v1/object.md create mode 100644 apis/docs/v1/payload.md.tmpl create mode 100644 apis/docs/v1/payload.tmpl create mode 100644 apis/docs/v1/remove.md create mode 100644 apis/docs/v1/search.md create mode 100644 apis/docs/v1/update.md create mode 100644 apis/docs/v1/upsert.md create mode 100644 buf.lock create mode 100644 internal/config/index_deleter.go create mode 100644 internal/errors/usearch_test.go create mode 100644 internal/net/grpc/codes/codes_test.go create mode 100644 versions/USEARCH_VERSION diff --git a/.all-contributorsrc b/.all-contributorsrc index a750612cb3..48fc076f91 100644 --- a/.all-contributorsrc +++ b/.all-contributorsrc @@ -194,6 +194,36 @@ "contributions": [ "doc" ] + }, + { + "login": "smorihira", + "name": "Shunya Morihira (森平 隼矢)", + "avatar_url": "https://avatars.githubusercontent.com/u/105629359?v=4", + "profile": "https://github.com/smorihira", + "contributions": [ + "tool", + "code" + ] + }, + { + "login": "iammytoo", + "name": "miyamoto", + "avatar_url": "https://avatars.githubusercontent.com/u/64457274?v=4", + "profile": "https://github.com/iammytoo", + "contributions": [ + "code", + "research" + ] + }, + { + "login": "highpon", + "name": "s-shiraki", + "avatar_url": "https://avatars.githubusercontent.com/u/54130718?v=4", + "profile": "https://www.highpon.com/", + "contributions": [ + "test", + "code" + ] } ], "contributorsPerLine": 7, diff --git a/.cspell.json b/.cspell.json index 71ced17ccf..4c77f84aa5 100644 --- a/.cspell.json +++ b/.cspell.json @@ -1,226 +1,2745 @@ { "version": "0.2", "language": "en", - "words": [ - "ACCESSS", + "import": [ + "@cspell/dict-cpp/cspell-ext.json", + "@cspell/dict-docker/cspell-ext.json", + "@cspell/dict-en_us/cspell-ext.json", + "@cspell/dict-fullstack/cspell-ext.json", + "@cspell/dict-git/cspell-ext.json", + "@cspell/dict-golang/cspell-ext.json", + "@cspell/dict-k8s/cspell-ext.json", + "@cspell/dict-makefile/cspell-ext.json", + "@cspell/dict-markdown/cspell-ext.json", + "@cspell/dict-npm/cspell-ext.json", + "@cspell/dict-public-licenses/cspell-ext.json", + "@cspell/dict-rust/cspell-ext.json", + "@cspell/dict-shell/cspell-ext.json" + ], + "ignorePaths": [ + "**/*.ai", + "**/*.drawio", + "**/*.hdf5", + "**/*.key", + "**/*.lock", + "**/*.log", + "**/*.md5", + "**/*.pack", + "**/*.pdf", + "**/*.pem", + "**/*.png", + "**/*.sum", + "**/*.svg", + "**/.cspell.json", + "**/.git/objects/**", + "**/cmd/agent/core/faiss/faiss", + "**/cmd/agent/core/ngt/ngt", + "**/cmd/agent/sidecar/sidecar", + "**/cmd/discoverer/k8s/discoverer", + "**/cmd/gateway/filter/filter", + "**/cmd/gateway/lb/lb", + "**/cmd/gateway/mirror/mirror", + "**/cmd/index/job/correction/index-correction", + "**/cmd/index/job/creation/index-creation", + "**/cmd/index/job/deletion/index-deletion", + "**/cmd/index/job/readreplica/rotate/readreplica-rotate", + "**/cmd/index/job/save/index-save", + "**/cmd/index/operator/index-operator", + "**/cmd/manager/index/index", + "**/cmd/tools/benchmark/job/job", + "**/cmd/tools/benchmark/operator/operator", + "**/cmd/tools/cli/loadtest/loadtest", + "**/hack/cspell/**", + "**/internal/core/algorithm/ngt/assets/index", + "**/internal/test/data/agent/ngt/validIndex" + ], + "patterns": [ + { + "name": "Ignore_addr_suffix", + "pattern": "/\\b\\w*addr\\b/" + }, + { + "name": "Ignore_addrs_suffix", + "pattern": "/\\b\\w*addrs\\b/" + }, + { + "name": "Ignore_buf_suffix", + "pattern": "/\\b\\w*buf\\b/" + }, + { + "name": "Ignore_cancel_suffix", + "pattern": "/\\b\\w*cancel\\b/" + }, + { + "name": "Ignore_cfg_suffix", + "pattern": "/\\b\\w*cfg\\b/" + }, + { + "name": "Ignore_ch_suffix", + "pattern": "/\\b\\w*ch\\b/" + }, + { + "name": "Ignore_cnt_suffix", + "pattern": "/\\b\\w*cnt\\b/" + }, + { + "name": "Ignore_conf_suffix", + "pattern": "/\\b\\w*conf\\b/" + }, + { + "name": "Ignore_conn_suffix", + "pattern": "/\\b\\w*conn\\b/" + }, + { + "name": "Ignore_ctx_suffix", + "pattern": "/\\b\\w*ctx\\b/" + }, + { + "name": "Ignore_dim_suffix", + "pattern": "/\\b\\w*dim\\b/" + }, + { + "name": "Ignore_dur_suffix", + "pattern": "/\\b\\w*dur\\b/" + }, + { + "name": "Ignore_env_suffix", + "pattern": "/\\b\\w*env\\b/" + }, + { + "name": "Ignore_err_suffix", + "pattern": "/\\b\\w*err\\b/" + }, + { + "name": "Ignore_error_suffix", + "pattern": "/\\b\\w*error\\b/" + }, + { + "name": "Ignore_errors_suffix", + "pattern": "/\\b\\w*errors\\b/" + }, + { + "name": "Ignore_errs_suffix", + "pattern": "/\\b\\w*errs\\b/" + }, + { + "name": "Ignore_idx_suffix", + "pattern": "/\\b\\w*idx\\b/" + }, + { + "name": "Ignore_len_suffix", + "pattern": "/\\b\\w*len\\b/" + }, + { + "name": "Ignore_mu_suffix", + "pattern": "/\\b\\w*mu\\b/" + }, + { + "name": "Ignore_opt_suffix", + "pattern": "/\\b\\w*opt\\b/" + }, + { + "name": "Ignore_opts_suffix", + "pattern": "/\\b\\w*opts\\b/" + }, + { + "name": "Ignore_pool_suffix", + "pattern": "/\\b\\w*pool\\b/" + }, + { + "name": "Ignore_req_suffix", + "pattern": "/\\b\\w*req\\b/" + }, + { + "name": "Ignore_res_suffix", + "pattern": "/\\b\\w*res\\b/" + }, + { + "name": "Ignore_size_suffix", + "pattern": "/\\b\\w*size\\b/" + }, + { + "name": "Ignore_vec_suffix", + "pattern": "/\\b\\w*vec\\b/" + } + ], + "ignoreRegExpList": [ + "Ignore_addr_suffix", + "Ignore_addrs_suffix", + "Ignore_buf_suffix", + "Ignore_cancel_suffix", + "Ignore_cfg_suffix", + "Ignore_ch_suffix", + "Ignore_cnt_suffix", + "Ignore_conf_suffix", + "Ignore_conn_suffix", + "Ignore_ctx_suffix", + "Ignore_dim_suffix", + "Ignore_dur_suffix", + "Ignore_env_suffix", + "Ignore_err_suffix", + "Ignore_error_suffix", + "Ignore_errors_suffix", + "Ignore_errs_suffix", + "Ignore_idx_suffix", + "Ignore_len_suffix", + "Ignore_mu_suffix", + "Ignore_opt_suffix", + "Ignore_opts_suffix", + "Ignore_pool_suffix", + "Ignore_req_suffix", + "Ignore_res_suffix", + "Ignore_size_suffix", + "Ignore_vec_suffix" + ], + "ignoreWords": [ "AQUASECURITY", - "AUTOBUILD", + "Addrs", + "Atof", + "Atol", + "Autoscaler", + "BINFMT", + "BUILDBASE", + "BUILDKIT", "BUILDX", - "Burstable", + "Bbolt", + "Buildx", + "CAPI", + "CHATOPS", "Capi", + "DISTROLESS", + "DNSA", + "Debugd", + "Debugf", + "Devcontainer", + "EUCJP", + "Errord", "Errorf", + "Eucjp", "FAISS", + "FASTOPEN", + "Faiss", + "Fatald", + "Fnum", + "GACHE", + "GETOBJECT", "GHACTION", + "GOARCH", + "GOBIN", "GOLANGCILINT", - "GOMAXPROCS", - "Godoc", + "GOLINES", + "GOPATH", + "GOPKG", + "GOPRIVATE", + "GOROOT", + "GOTEST", + "GOTESTS", + "Gache", + "Gocqlx", + "Gofumpt", + "Goleak", + "IDRPC", + "INITCONTAINER", + "Idxs", + "Iface", + "Indegree", + "Infod", "Infof", + "Inuse", + "Jaccard", + "KEEPIDLE", + "KEYSPACE", + "KLOG", + "KUBECONFIG", "KUBELINTER", + "KVSDB", + "Keyspace", + "Kvsdb", "LANGUAGETOOL", - "Milli", + "LDFLAGS", + "LOADTEST", + "LOGRUS", + "LOGRUs", + "MNIST", + "Mallocs", + "Mirr", + "Nbits", + "Nocie", "ONNX", - "OTEL", - "PROTOBUF", + "Oneof", + "Outdegree", + "Outf", + "PORTFORWARD", + "Portforward", + "Prost", + "Ptop", + "Pyroscope", + "QUICKACK", + "RDONLY", + "READREPLICA", + "RECVORIGDSTADDR", "REVIEWDOG", - "Roundtripper", + "ROOTDIR", + "RUSTUP", + "Readreplica", + "Rebalance", + "Regist", + "Represets", + "Retryable", + "Reviewdog", + "Rootdir", + "Ruleguard", "SARIF", - "SOFTPROPS", - "Structs", + "SYFT", + "Sjis", + "Stmts", + "Struct", + "Svcs", "TELEPRESENCE", + "TEXTLINT", + "TMPDIR", + "Tgts", + "Tolerations", + "UPSERT", + "Unmarshal", + "Upsert", "VALD", - "VALDCLI", + "VALDRELEASE", + "VECTORIZER", "VHOR", "Vald", + "Vald's", + "Vals", + "Vecs", "Vectorizer", + "Vqueue", + "Warnd", + "Warnf", + "Wrapf", + "ZEROLOG", + "ZSTD", + "Zstd", + "accesskey", "accesslog", - "achive", + "adal", "addrs", + "adipisicing", + "afero", + "aggr", + "ajstarks", + "aknishid", + "akrylysov", + "aliqua", + "aliquip", + "alives", + "amet", + "amqp", + "ando", + "antihax", + "anypb", + "apiextensions", + "apimachinery", "apiserver", - "attirbute", + "appengine", + "aquasecurity", + "armon", + "astcopy", + "astequal", + "atof", + "atol", + "atot", + "aute", + "autobuild", + "autoclean", + "automaxprocs", + "autoremove", + "autorest", + "autoscaler", + "azcore", + "azidentity", + "backoffmetrics", + "batchv", "bbolt", - "boudary", - "brandguidelines", + "bdbs", + "benbjohnson", + "benchjob", + "benchjobs", + "benchmem", + "benchscenario", + "bento", + "beorn", + "bigann", + "binfmt", + "bjns", + "blackfriday", + "bmizerany", + "boolint", + "boombuler", + "brnd", + "buckhash", + "bufbuild", + "buger", "buildbase", + "buildkit", + "buildx", + "bulkinsert", + "burstable", + "bvecs", + "bytefmt", + "bzrignore", + "canceld", + "capi", + "cbmetrics", + "cenkalti", + "cespare", "chatops", - "chrono", + "chunkreader", + "chzyer", + "cillum", "circuitbreaker", + "clientip", + "clientmock", + "clientset", + "cloudfoundry", + "cloudsql", "cloudstorage", "clusterrole", "clusterrolebinding", + "clusterrolebindings", + "clusterroles", + "cmder", + "cmdflag", + "cmps", + "cnts", + "cockroachdb", + "codegen", + "collatz", + "colorstring", + "commandhistory", + "commodo", + "concurency", "configmap", + "configsources", + "conflint", + "consequat", + "consistetncy", "contributorsrc", "conv", + "copylocks", + "corev", + "cpuguy", + "cpuid", "crds", + "creack", + "createandsave", "crlfmt", + "crorg", + "cstring", "ctxio", + "cupidatat", + "customresourcedefinitions", + "cvspq", "daemonset", + "dataspace", + "datelier", + "davecgh", + "dbuild", + "dbus", + "dcmake", + "deafult", + "debg", + "debugd", + "debugf", + "decbytes", + "deepcopy", "deepsource", + "dejavu", + "deletecollection", + "deleter", + "demangle", + "denisenkom", + "deserunt", "devcontainer", + "devcontainers", "devel", + "devigned", + "dgryski", + "dicoverer", + "difflib", + "diskv", + "distroless", + "dnaeon", + "dnsa", + "dockerfiles", + "dolore", + "dotdc", + "dotproduct", + "dpkg", + "dset", + "duis", + "durationpb", + "dylib", + "easyjson", + "ecrud", + "eiusmod", + "elif", + "elit", + "emap", + "emicklei", + "enim", + "envkey", "envoyproxy", + "eoptions", + "errcheck", "errdetails", "errgroup", - "facebookresearch", + "errgroup", + "errord", + "errorf", + "errorln", + "esac", + "eucjp", + "evanphx", + "eventstream", + "excepteur", + "exhaustruct", + "extendee", + "extldflags", + "faild", "faiss", "fastime", + "fastopen", + "fastuuid", + "fatald", + "felixge", + "fgprof", + "finalizer", + "finalizers", + "firestore", + "fitos", + "flamegraph", + "flot", + "fmap", + "fname", + "fnum", + "fogleman", + "fopenmp", + "fortytw", + "fpdf", + "frankban", + "freelist", + "freetype", + "fsnotify", + "fugiat", "fuid", + "funakoshi", + "fvecs", "gache", + "gbackoff", + "gcsblob", + "genproto", "getobject", "getstarted", + "gfortran", + "ghaction", + "ghcrorg", "gitfiles", + "glfw", + "glog", + "goarch", + "goautoneg", + "gobc", + "gobin", + "gobwas", + "gocache", + "goccy", + "gocloud", + "gocql", + "gocqlx", + "gocraft", + "godbus", + "godebug", + "godeltaprof", + "godoc", + "gofpdf", + "gofpdi", + "gofrs", "gofumpt", + "gofuzz", + "gogrep", "goimports", + "gojsonpointer", + "gojsonreference", + "gojsonschema", "golangci", + "golangcilint", "goleak", "golines", - "gongt", + "gomaxprocs", + "gomega", + "gomnd", + "gomodifytags", + "gomodules", + "gonic", "gonum", + "gopath", + "gopkg", + "gopls", + "goprivate", + "goproxy", + "gopter", + "goroot", + "goroutines", "gorules", + "gostub", "gotest", + "gotestfmt", "gotests", "gotmpl", + "gotool", + "gover", + "govet", + "gpgsign", + "graphviz", + "gregjones", + "grimaud", + "groundtruth", + "groupcache", + "grpclog", + "grpcmock", + "grpcreplay", "hadolint", + "hailocab", + "hanwen", + "healthcheck", + "healthz", "helmignore", - "httputil", - "icfg", + "hgignore", + "hiroto", + "hlts", + "hoge", + "honnef", + "horizontalpodautoscalers", + "hostport", + "hrichik", + "hrichiksite", + "httpcache", + "httpfs", + "httphead", + "httpreplay", + "iancoleman", + "ianlancetaylor", + "iblob", + "iconfig", + "idelay", + "idrpc", + "idxs", + "iface", + "igmp", + "imds", + "incididunt", + "inconshreveable", + "indegree", + "indexmapkey", + "indexmapvalue", + "infod", + "infof", + "infoln", + "infometrics", + "ingester", "initcontainer", + "initdb", + "inmemory", "innerproduct", - "ioutil", + "insuffcient", + "inuse", + "irure", + "isatty", + "ivecs", "jaccard", - "japansearch", + "jackc", + "jaegertracing", + "jessevdk", + "jitted", + "jmespath", + "jmoiron", + "joho", + "josharian", + "jrnlw", + "jsonparser", + "jsonpointer", + "jsonreference", + "jstemmer", + "junsei", + "kadowaki", + "kato", + "katz", + "kbps", + "keepalive", + "keepalives", + "keepidle", + "kevindiu", + "keyspace", + "kiichiro", + "kisielk", + "klauspost", + "klog", + "kmrmt", + "koichi", "kosarak", + "kosuke", "kpango", + "kprofefe", + "kubeconfig", "kubelinter", - "kustomization", + "kubeval", "kvsdb", + "kvstore", + "kvvk", + "kylelemons", + "labelmap", + "labore", + "laboris", + "laborum", + "languagetool", + "ldconfig", + "ldflags", + "leaktest", + "leanovate", + "lenmapkey", + "lenmapvalue", + "leodido", + "lfaiss", + "libaec", "libhdf", + "liblapack", + "libomp", + "libopenblas", + "lifecycler", + "liggitt", + "liusy", + "livenesss", + "lngt", "loadbalancer", "loadtest", + "localserial", + "localtime", + "logex", + "logfmt", + "logr", + "logrus", + "ltsv", + "lucasb", "lycorp", + "mailru", + "mallocs", + "mapkey", + "mapvalue", + "mattn", + "matttproud", "maxprocs", - "minio", + "mazumder", + "mcache", + "memstats", + "mertics", + "metas", + "metav", + "metricinterceptor", + "mfridman", + "miette", + "minburst", + "mirr", + "misscheduled", + "mitchellh", + "mktemp", + "mktmp", "mnist", + "mnode", + "moby", + "modocache", + "mollit", + "monochromegane", + "montanaflynn", + "morimoto", + "mountinfo", + "mpod", + "mspan", + "mssqldb", + "mthe", "multiapis", "multicluster", - "nanos", + "munnerz", + "nang", "nbits", - "networkpolicy", + "ncos", + "neighors", + "networkpolicies", + "ngroup", + "ngtd", + "nhooyr", + "niemeyer", + "nindent", "nlist", + "nobic", + "nocie", + "nogce", + "nolint", + "noninteractive", + "nonroot", + "nopvq", "normalizedangle", "normalizedcosine", + "normalizedl", + "normang", + "normcos", + "norml", "nosql", + "nostrud", + "notests", + "npoints", + "ntotal", + "nulla", + "nvim", + "nvimlog", + "nxadm", "nytimes", + "objc", + "objs", + "objx", + "occaecat", + "ocsql", + "officia", + "okamura", "oneof", "onnx", - "otel", - "otlp", + "onsi", + "opencensus", + "opencontainers", + "osdk", + "ospace", + "otelgrpc", + "otlpmetric", + "otlpmetricgrpc", + "otlptrace", + "otlptracegrpc", + "outdegree", + "outf", + "pariatur", + "pbdocs", + "pbgos", + "peakrate", + "persistentvolumeclaims", + "peterbourgon", + "pflag", + "pgio", + "pgmock", + "pgpassfile", + "pgproto", + "pgroup", + "pgservicefile", + "pgtype", + "phpdave", + "pierrec", + "pipefail", + "pipeliner", + "planetscale", + "pmezard", + "poddisruptionbudgets", + "podname", "pogreb", + "poinc", + "pointradius", "portforward", "pprof", - "priorityclass", + "prashantv", + "preriodically", + "priorityclasses", "profefe", + "progressbar", + "proident", "promtail", "prost", "protobuf", "protoc", + "protocolbuffers", + "protohelpers", + "protoimpl", + "protojson", + "protoreflect", "protos", + "protovalidate", + "pstartf", + "pstopf", + "ptop", "pyroscope", + "quantizer", + "quasilyte", + "queryx", + "quickack", + "quicktest", + "quis", + "ratelimit", + "rdonly", "readreplica", + "readyz", "rebalance", "rebalancing", + "recvorigdstaddr", + "regist", + "registerers", + "replayers", "replicasets", + "repr", + "reprehenderit", + "represets", "rerank", - "retrive", + "resered", + "retryable", "reviewdog", + "rgba", + "rintaro", + "rinx", + "roccd", + "rogpeppe", + "rolebindings", + "rootdir", + "roundtripper", + "rpcs", + "ruleguard", + "russross", "rustc", "rustup", - "serviceaccount", + "ruudk", + "sarif", + "saveindex", + "sbinet", + "schollz", + "scylladb", + "secretkey", + "secretmanager", + "semconv", + "sendemail", + "sergi", + "serversscheme", + "serviceaccounts", + "sess", + "sgroup", + "shiraishi", + "shlex", + "shmem", + "shogo", + "shopspring", + "shurcoo", + "signingkey", + "signoff", "singleflight", + "sint", + "sirupsen", + "siyuan", + "sjis", + "skipcq", + "smallscreen", + "snapshotter", + "snapshotv", + "softprops", + "sparsejaccard", + "spdystream", + "spjac", + "sptag", + "sqlexp", + "sqlmock", + "sqlx", + "srvs", + "sspan", + "stackdriver", + "starlark", "statefulset", - "steamsearch", + "statefulsets", + "staticcheck", + "stdinfo", + "stdr", + "stix", + "stmts", + "stockout", + "stoewer", + "storageclass", + "stos", + "strcase", "streaminsert", + "stretchr", + "strg", + "strictgoimports", + "strparse", + "struct", "structs", + "subquantizers", + "substr", "subtests", - "testdata", - "textlintrc", + "sunt", + "svcs", + "syft", + "tabwriter", + "tada", + "tagalign", + "taisuou", + "takuyaymd", + "tcql", + "technote", + "telepresence", + "tempor", + "testfunc", + "textlint", + "tgts", + "thedrow", + "threadcreate", + "timelimit", + "timepicker", "timeutil", + "tlsca", + "tmpdir", + "tmpfs", + "tmpl", + "tmproj", + "tolerations", + "tonistiigi", + "toolsmith", + "tparse", + "traceinterceptor", "traefik", + "treeprint", + "trunc", + "typeparams", + "tzdata", + "udpa", + "ugorji", + "ullamco", + "ultiple", + "unbackupped", + "unixgram", + "unixpacket", + "unmarshal", + "unparam", "upsert", "upserted", "urlopener", "usecase", + "usecases", + "userdefined", "vald", + "vald's", "valdbenchmarkjob", + "valdbenchmarkjobs", + "valdbenchmarkoperator", "valdbenchmarkoperatorrelease", + "valdbenchmarkoperatorreleases", "valdbenchmarkscenario", + "valdbenchmarkscenarios", + "valdchart", + "valdcli", "valdhelmoperatorrelease", - "valdhelmopratorreleases", + "valdhelmoperatorreleases", "valdmirrortarget", "valdmirrortargets", + "valdname", "valdrelease", + "valdreleases", + "vals", + "vankichi", + "vbjs", "vbor", + "vbors", + "vbss", "vdaas", "vdctl", + "vecs", "vectorizer", - "vectorizing", + "velit", + "veniam", + "versin", + "vfsgen", "vhor", + "vhors", + "vmap", + "vmdata", + "vmexe", + "vmhwm", + "vmlck", + "vmlib", + "vmpeak", + "vmpin", + "vmpte", + "vmrss", + "vmstk", + "vmswap", + "volumesnapshot", + "volumesnapshots", + "voluptate", + "voronoi", "vqueue", "vtproto", - "werr", + "warnd", + "warnf", + "warningf", + "warningln", + "wfci", "whitesource", + "wiretype", + "wlhf", + "workdir", + "worktree", + "wrapf", + "wrapperspb", + "xaxis", + "xeipuuv", + "xids", + "xlab", + "xxhash", + "xzvf", "yahoojapan", "yamlfmt", + "yaxes", + "yaxis", + "ykadowak", + "yukawa", + "yusuke", + "zapr", + "zchee", + "zdtd", + "zeebo", + "zerolog", + "zoneinfo", "zstd" ], - "ignoreWords": [ - "CMYK", - "SHOGO", - "TECHNOTE", - "agentd", - "benchmarkd", - "conflint", - "sidecard" - ], - "dictionaries": [ - "softwareTerms", - "misc", - "companies", - "typescript", - "node", - "html", - "css", - "fonts", - "filetypes", - "npm" - ], - "ignorePaths": [ - "**/*.ai", - "**/*.drawio", - "**/*.hdf5", - "**/*.key", - "**/*.lock", - "**/*.log", - "**/*.md5", - "**/*.pack", - "**/*.pdf", - "**/*.pem", - "**/*.png", - "**/*.sum", - "**/*.svg", - "**/.git/objects/**", - "**/cmd/agent/core/faiss/faiss", - "**/cmd/agent/core/ngt/ngt", - "**/cmd/agent/sidecar/sidecar", - "**/cmd/discoverer/k8s/k8s", - "**/cmd/gateway/filter/filter", - "**/cmd/gateway/lb/lb", - "**/cmd/gateway/mirror/mirror", - "**/cmd/index/job/correction/correction", - "**/cmd/index/job/creation/creation", - "**/cmd/index/job/readreplica/rotate/rotate", - "**/cmd/index/job/save/save", - "**/cmd/manager/index/index", - "**/internal/core/algorithm/ngt/assets/index", - "**/internal/test/data/agent/ngt/validIndex" - ] + "ignoreWordsMap": { + ".all-contributorsrc": [ + "Funakoshi", + "Grimaud", + "Hiroto", + "Hrichik", + "Kadowaki", + "Kato", + "Katz", + "Kiichiro", + "Koichi", + "Kosuke", + "Mazumder", + "Morimoto", + "Okamura", + "Rintaro", + "Shiraishi", + "Siyuan", + "YUKAWA", + "Yusuke", + "aknishid", + "ando", + "datelier", + "dotdc", + "hrichiksite", + "junsei", + "kevindiu", + "liusy", + "pgrimaud", + "taisuou", + "takuyaymd", + "thedrow", + "ykadowak", + "zchee" + ], + ".commit_template": ["bento", "tada"], + ".devcontainer/devcontainer.json": [ + "PTRACE", + "commandhistory", + "seccomp", + "zshhistory" + ], + ".devcontainer/postAttachCommand.sh": ["commandhistory"], + ".fossa.yml": ["vdctl"], + ".git/hooks/applypatch-msg.sample": ["commitmsg"], + ".git/hooks/fsmonitor-watchman.sample": [ + "CHLD", + "binmode", + "clockid", + "msys" + ], + ".git/hooks/pre-commit.sample": ["allownonascii"], + ".git/hooks/pre-rebase.sample": ["Hamano", "Junio", "oneline"], + ".git/hooks/pre-receive.sample": ["echoback"], + ".git/hooks/push-to-checkout.sample": ["behaviour"], + ".git/hooks/sendemail-validate.sample": [ + "SENDEMAIL", + "Worktree", + "sendemail", + "worktree" + ], + ".git/hooks/update.sample": [ + "allowdeletetag", + "allowmodifytag", + "allowunannotated", + "newrev", + "oldrev", + "projectdesc" + ], + ".gitattributes": ["contributorsrc"], + ".gitfiles": [ + "CMYK", + "PROTOBUF", + "SHOGO", + "SOFTPROPS", + "TECHNOTE", + "accesslog", + "agentd", + "benchmarkd", + "brandguidelines", + "clusterrolebinding", + "conflint", + "contributorsrc", + "conv", + "darkgray", + "getstarted", + "gitfiles", + "gongt", + "helmignore", + "kosarak", + "kustomization", + "lycorp", + "multicluster", + "networkpolicy", + "nytimes", + "priorityclass", + "promtail", + "serviceaccount", + "sidecard", + "testdata", + "textlintrc", + "tmpl", + "valdmirrortarget", + "vdctl", + "whitesource" + ], + ".github/actions/docker-build/action.yaml": ["opencontainers"], + ".github/actions/notify-slack/action.yaml": ["technote"], + ".github/chatops_commands.md": ["bento"], + ".github/chatops_permissions.yaml": ["datelier", "kevindiu"], + ".github/conflint.yaml": ["kubeval"], + ".github/dependabot.yml": ["gomod"], + ".github/helm/values/values-agent-sidecar.yaml": ["ACCESSKEY", "SECRETKEY"], + ".github/helm/values/values-chaos.yaml": ["serversscheme"], + ".github/helm/values/values-profile.yaml": ["GOMAXPROCS"], + ".github/helm/values/values-readreplica.yaml": ["snapclass"], + ".github/workflows/_docker-image-scan.yaml": [ + "imagename", + "opencontainers" + ], + ".github/workflows/_docker-image.yaml": [ + "DOCKERHUB", + "buildkitd", + "stargz" + ], + ".github/workflows/backport.yml": ["startswith"], + ".github/workflows/build-binaries.yml": ["shogo"], + ".github/workflows/chatops.yml": ["gentest"], + ".github/workflows/detect-internal-config-changes.yml": [ + "INTCFG", + "interal" + ], + ".github/workflows/dockers-gateway-mirror-image.yaml": ["nirror"], + ".github/workflows/e2e-chaos.yaml": ["clusterwide"], + ".github/workflows/e2e-max-dim.yml": ["readlink"], + ".github/workflows/e2e-profiling.yml": ["threadcreate"], + ".github/workflows/fossa.yml": ["urllib"], + ".github/workflows/labeler.yml": ["shortstat"], + ".github/workflows/release.yml": ["goproxy", "softprops"], + ".github/workflows/reviewdog-hadolint.yml": ["Dockerfiles"], + ".github/workflows/reviewdog-k8s.yml": ["CONFLINT", "conflint", "kubeval"], + ".github/workflows/reviewdog-markdown.yml": ["testlint"], + ".github/workflows/test-hack.yml": ["notests", "smallscreen"], + ".github/workflows/unit-test.yaml": ["notests", "smallscreen"], + ".gitignore": ["MSVC", "dylib", "nvim", "nvimlog", "rustc", "rustfmt"], + ".golangci.yml": [ + "asasalint", + "asciicheck", + "bidichk", + "bodyclose", + "contextcheck", + "copylocks", + "cyclop", + "decorder", + "depguard", + "dupl", + "dupword", + "durationcheck", + "errcheck", + "errchkjson", + "errname", + "errorlint", + "execinquery", + "exhaustruct", + "exportloopref", + "forbidigo", + "forcetypeassert", + "ginkgolinter", + "gocheckcompilerdirectives", + "gochecknoglobals", + "gochecknoinits", + "gocognit", + "goconst", + "gocritic", + "gocyclo", + "godox", + "gofmt", + "goheader", + "gomoddirectives", + "gomodguard", + "goprintffuncname", + "gosec", + "gosimple", + "gosmopolitan", + "govet", + "importas", + "ineffassign", + "interfacebloat", + "ireturn", + "loggercheck", + "logrlint", + "makezero", + "megacheck", + "musttag", + "nakedret", + "nestif", + "nilnil", + "nlreturn", + "nolintlint", + "nonamedreturns", + "nosprintfhostport", + "paralleltest", + "prealloc", + "predeclared", + "promlinter", + "rowserrcheck", + "sqlclosecheck", + "staticcheck", + "stylecheck", + "testableexamples", + "testpackage", + "thelper", + "tparallel", + "unconvert", + "unparam", + "usestdlibvars", + "vetshadow", + "wastedassign", + "wrapcheck", + "wslissues" + ], + ".prh.yaml": [ + "Burstable", + "Flamegraph", + "besteffort", + "burstable", + "documentaion", + "flamegraph", + "valdcli" + ], + ".textlintrc": [ + "idrequest", + "mevie", + "rerank", + "sptag", + "subtest", + "vektor" + ], + "CHANGELOG.md": [ + "CFLAGS", + "CXXFLAGS", + "Cellebration", + "Dockerfiles", + "Metas", + "Migratation", + "OSDK", + "Stackdriver", + "Tutotial", + "alogrithm", + "ando", + "apiversion", + "bento", + "bidi", + "bulkinsert", + "cass", + "cheking", + "continous", + "conv", + "createindex", + "deeepsource", + "depentency", + "devcontiner", + "dotdc", + "errorgroup", + "exection", + "exhaustruct", + "exsiting", + "gache's", + "gorountine", + "hrichiksite", + "informations", + "iocopy", + "junsei", + "libquadmath", + "lincense", + "liusy", + "makr", + "malloc", + "minnum", + "multiplatforms", + "nvimlog", + "osdk", + "pacakge", + "pacicked", + "pbdocs", + "performace", + "priorityclasses", + "savedmodel", + "slowloris", + "sptag", + "stackdriver", + "tada", + "takuyaymd", + "tensorlfow", + "tset", + "unkeyed", + "unneccessary", + "valdcli", + "vcache", + "vqueue's", + "workdir", + "yamls", + "ykadowak", + "zchee" + ], + "CONTRIBUTING.md": ["Firstname", "Lastname", "implmentes", "newfeature"], + "Makefile": [ + "BLAS", + "CRORG", + "DBLA", + "DBUILD", + "DCMAKE", + "DFAISS", + "EXTLDFLAGS", + "GHCRORG", + "GOCACHE", + "GOPROXY", + "MAKELISTS", + "NPROCESSORS", + "NUMPANES", + "ONLN", + "Ofast", + "PBDOCS", + "PBGOS", + "PROTOBUF", + "PROTODIRS", + "PROTOS", + "RLENGTH", + "RSTART", + "STDDEV", + "armv", + "copress", + "crlfmt", + "dockerfiles", + "fmerge", + "fopenmp", + "funroll", + "gitfiles", + "gsub", + "laec", + "lgfortran", + "lhdf", + "libfaiss", + "llapack", + "lopenblas", + "lstdc", + "mtune", + "ncpu", + "nproc", + "pthread", + "relro", + "strictgoimports", + "toplevel" + ], + "Makefile.d/bench.mk": ["benchmem", "cpuprofile", "memprofile", "nvim"], + "Makefile.d/build.mk": ["EXTLDFLAGS", "linkmode", "popd", "pushd"], + "Makefile.d/dependencies.mk": [ + "GOCACHE", + "PROTOBUF", + "modcache", + "testcache" + ], + "Makefile.d/docker.mk": [ + "CRORG", + "GHCRORG", + "buildcache", + "mediatypes", + "npmjs" + ], + "Makefile.d/e2e.mk": ["ECRUD"], + "Makefile.d/functions.mk": [ + "APIV", + "PBGOS", + "buildid", + "extldflags", + "modcacherw", + "netgo", + "osusergo", + "trimpath" + ], + "Makefile.d/helm.mk": ["valdmirrortarget", "xzvf"], + "Makefile.d/k3d.mk": ["loadbalancer", "storageclass"], + "Makefile.d/k8s.mk": [ + "CRORG", + "cainjector", + "jaegertracing", + "operatorusing", + "promtail", + "serrver" + ], + "Makefile.d/kind.mk": ["conntrack", "netfilter"], + "Makefile.d/minikube.mk": ["hostpath", "storageclass"], + "Makefile.d/proto.mk": ["PROTOS", "protobufs"], + "Makefile.d/test.mk": [ + "covermode", + "coverprofile", + "cweill", + "gotesttools", + "mfridman", + "notests", + "showteststatus" + ], + "Makefile.d/tools.mk": [ + "DBUILD", + "DCMAKE", + "DHDF", + "DZLIB", + "busa", + "crlfmt", + "fatih", + "gomodifytags", + "goplay", + "haya", + "honnef", + "josharian", + "libz", + "momotaro", + "mvdan", + "segmentio", + "staticcheck", + "strictgoimports", + "tlsv", + "xzvf" + ], + "README.md": [ + "Codacy", + "Funakoshi", + "Grimaud", + "Hiroto", + "Hrichik", + "Kadowaki", + "Kato", + "Katz", + "Kiichiro", + "Koichi", + "Kosuke", + "Mazumder", + "Morimoto", + "Okamura", + "Rintaro", + "Shiraishi", + "Siyuan", + "YUKAWA", + "Yusuke", + "aknishid", + "ando", + "datelier", + "junsei", + "kevindiu", + "liusy", + "lycorp", + "srcset", + "taisuou", + "takuyaymd", + "thedrow", + "zchee" + ], + "apis/docs/v1/docs.md": [ + "Bignum", + "Fixnum", + "STOCKOUT", + "hasn", + "sfixed", + "sint" + ], + "apis/grpc/v1/payload/payload.pb.go": ["wrapperspb"], + "apis/grpc/v1/payload/payload.pb.json.go": ["protojson"], + "apis/grpc/v1/payload/payload_vtproto.pb.go": [ + "Indexmapkey", + "Indexmapvalue", + "Lenmapkey", + "Lenmapvalue", + "mapkey", + "mapvalue", + "postmsg", + "protohelpers", + "vtmsg", + "vtpb", + "wiretype", + "wrapperspb" + ], + "apis/grpc/v1/rpc/errdetails/error_details.pb.go": [ + "STOCKOUT", + "durationpb" + ], + "apis/grpc/v1/rpc/errdetails/error_details.pb.json.go": ["protojson"], + "apis/grpc/v1/rpc/errdetails/error_details_vtproto.pb.go": [ + "Indexmapkey", + "Indexmapvalue", + "Lenmapkey", + "Lenmapvalue", + "durationpb", + "mapkey", + "mapvalue", + "protohelpers", + "wiretype" + ], + "apis/proto/v1/agent/core/agent.proto": ["createandsave"], + "apis/proto/v1/payload/payload.proto": ["objc"], + "apis/proto/v1/rpc/errdetails/error_details.proto": ["STOCKOUT", "objc"], + "apis/swagger/v1/agent/core/agent.swagger.json": ["createandsave"], + "assets/test/templates/common/fill.tmpl": ["uintptr"], + "assets/test/templates/common/function.tmpl": ["Subtests"], + "assets/test/templates/option/function.tmpl": ["Subtests"], + "buf.gen.yaml": ["mfridman", "neoeinstein", "openapiv", "pseudomuto"], + "charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml": ["vbjs"], + "charts/vald-benchmark-operator/crds/valdbenchmarkoperatorrelease.yaml": [ + "valdbenchmarkoperator", + "valdbenchmarkoperatorreleases", + "vbor", + "vbors" + ], + "charts/vald-benchmark-operator/crds/valdbenchmarkscenario.yaml": ["vbss"], + "charts/vald-benchmark-operator/templates/clusterrole.yaml": [ + "deletecollection" + ], + "charts/vald-helm-operator/README.md": ["readyz"], + "charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml": ["vhors"], + "charts/vald-helm-operator/templates/clusterrole.yaml": [ + "clusterrolebindings", + "clusterroles", + "customresourcedefinitions", + "horizontalpodautoscalers", + "networkpolicies", + "persistentvolumeclaims", + "poddisruptionbudgets", + "priorityclasses", + "serviceaccounts", + "statefulsets" + ], + "charts/vald-helm-operator/values.yaml": ["readyz"], + "charts/vald-readreplica/Chart.yaml": ["ykadowak"], + "charts/vald-readreplica/templates/deployment.yaml": [ + "valdchart", + "valdname" + ], + "charts/vald-readreplica/templates/hpa.yaml": ["valdchart", "valdname"], + "charts/vald-readreplica/templates/svc.yaml": ["valdchart", "valdname"], + "charts/vald/README.md": ["goroutines"], + "charts/vald/crds/valdmirrortarget.yaml": [ + "valdmirrortarget", + "valdmirrortargets", + "vmts" + ], + "charts/vald/templates/_helpers.tpl": ["envkey", "rsslimit", "vszlimit"], + "charts/vald/templates/gateway/mirror/clusterrole.yaml": [ + "valdmirrortargets" + ], + "charts/vald/templates/index/job/readreplica/rotate/clusterrole.yaml": [ + "persistentvolumeclaims" + ], + "charts/vald/templates/index/job/readreplica/rotate/configmap.yaml": [ + "envkey" + ], + "charts/vald/values.schema.json": ["goroutines"], + "charts/vald/values.yaml": ["goroutines"], + "cmd/tools/cli/benchmark/core/main.go": [ + "pfile", + "vmdata", + "vmexe", + "vmlib", + "vmlock", + "vmpin", + "vmpte", + "vmstack", + "vmswap" + ], + "dockers/binfmt/Dockerfile": ["tonistiigi"], + "dockers/ci/base/Dockerfile": ["graphviz"], + "dockers/ci/base/README.md": ["titile"], + "dockers/dev/Dockerfile": ["gomodifytags", "graphviz", "staticcheck"], + "docs/api/build_proto.md": ["chrono", "nanos", "protos", "rustc"], + "docs/contributing/coding-style.md": [ + "Roundtripper", + "Structs", + "crlfmt", + "httputil", + "ioutil", + "structs", + "subtests" + ], + "docs/overview/about-vald.md": ["rebalancing", "rerank"], + "docs/overview/component/agent.md": ["verctors"], + "docs/overview/component/discoverer.md": ["nodeby"], + "docs/performance/continuous-benchmark.md": ["vbor"], + "docs/performance/loadtest.md": ["GOMAXPROCS", "maxprocs", "streaminsert"], + "docs/tutorial/get-started-with-faiss-agent.md": ["cvspq", "jrnlw"], + "docs/tutorial/get-started.md": [ + "cvspq", + "getstarted", + "jrnlw", + "loadbalancer" + ], + "docs/tutorial/vald-agent-standalone-on-docker.md": [ + "GOMAXPROCS", + "maxprocs" + ], + "docs/tutorial/vald-multicluster-on-k8s.md": [ + "brbsp", + "dnxbb", + "ghlpx", + "gzcr", + "hbklj", + "kgrdf", + "multicluster", + "vjbqx", + "vlmpg", + "wtlcv", + "xmws" + ], + "docs/usecase/usage-example.md": ["vectorizing"], + "docs/user-guides/backup-configuration.md": ["ACCESSS"], + "docs/user-guides/capacity-planning.md": ["Burstable"], + "docs/user-guides/client-api-config.md": ["Milli", "achive", "rerank"], + "docs/user-guides/cluster-role-binding.md": [ + "clusterrolebinding", + "finalizers", + "retrive", + "valdmirrortargets" + ], + "docs/user-guides/deployment.md": ["finalizers", "valdhelmopratorreleases"], + "example/helm/values-standalone-agent-ngt.yaml": ["mnist's"], + "example/helm/values-with-pyroscope.yaml": ["serversscheme"], + "example/helm/values.yaml": ["Agnet", "mnist's", "serversscheme"], + "example/manifest/scylla/configmap.yaml": ["initdb"], + "example/manifest/scylla/job.yaml": ["cqlsh", "initdb"], + "go.mod": [ + "adal", + "afero", + "ajstarks", + "amqp", + "ansiterm", + "antihax", + "appengine", + "armon", + "astcopy", + "astequal", + "autorest", + "azcore", + "azidentity", + "benbjohnson", + "beorn", + "blackfriday", + "bmizerany", + "boombuler", + "buger", + "bytefmt", + "campoy", + "cenkalti", + "cespare", + "chunkreader", + "chzyer", + "cloudfoundry", + "cloudsql", + "cmdflag", + "colorstring", + "configsources", + "cpuguy", + "cpuid", + "creack", + "davecgh", + "dbus", + "dejavu", + "demangle", + "denisenkom", + "devigned", + "dgryski", + "difflib", + "diskv", + "dnaeon", + "easyjson", + "embedmd", + "emicklei", + "errcheck", + "evanphx", + "eventstream", + "fastuuid", + "felixge", + "fgprof", + "filippo", + "firestore", + "flowrate", + "fogleman", + "fortytw", + "fpdf", + "frankban", + "freetype", + "glfw", + "goautoneg", + "gobwas", + "godbus", + "godebug", + "godeltaprof", + "gofpdf", + "gofpdi", + "gofrs", + "gofuzz", + "gogrep", + "gojsonpointer", + "gojsonreference", + "gojsonschema", + "gomega", + "gomodules", + "gonic", + "gostub", + "gotool", + "gover", + "gregjones", + "groupcache", + "grpcreplay", + "hailocab", + "hanwen", + "honnef", + "httpcache", + "httpfs", + "httphead", + "httpreplay", + "httpsnoop", + "iancoleman", + "ianlancetaylor", + "imdario", + "imds", + "inconshreveable", + "isatty", + "jackc", + "jessevdk", + "jmespath", + "jmoiron", + "joho", + "josharian", + "jsonparser", + "jsonpointer", + "jsonreference", + "jstemmer", + "kisielk", + "kyaml", + "kylelemons", + "leaktest", + "leodido", + "liggitt", + "logex", + "logfmt", + "logr", + "lucasb", + "mailru", + "mattn", + "matttproud", + "mergo", + "mitchellh", + "modocache", + "monochromegane", + "montanaflynn", + "mountinfo", + "mssqldb", + "munnerz", + "nhooyr", + "niemeyer", + "nxadm", + "objx", + "ocsql", + "onsi", + "otelhttp", + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "peterbourgon", + "pflag", + "pgio", + "pgmock", + "pgpassfile", + "pgproto", + "pgservicefile", + "pgtype", + "phpdave", + "pierrec", + "pmezard", + "prashantv", + "progressbar", + "quicktest", + "ratelimit", + "replayers", + "rogpeppe", + "russross", + "ruudk", + "sbinet", + "schollz", + "secretmanager", + "sergi", + "shlex", + "shopspring", + "shurcoo", + "sirupsen", + "spdystream", + "sqlexp", + "sqlmock", + "sqlx", + "starlark", + "stdinfo", + "stdr", + "stix", + "stoewer", + "strcase", + "strparse", + "tabwriter", + "toolsmith", + "treeprint", + "typeparams", + "udpa", + "ugorji", + "vfsgen", + "xeipuuv", + "xlab", + "xxhash", + "zapr" + ], + "hack/benchmark/assets/x1b/loader.go": ["fname"], + "hack/benchmark/assets/x1b/loader_test.go": ["fname"], + "hack/benchmark/internal/db/nosql/cassandra/cassandra_test.go": ["metas"], + "hack/benchmark/internal/db/nosql/cassandra/testdata.json": [ + "Adipisicing", + "Aliqua", + "Aliquip", + "Amet", + "Aute", + "Cillum", + "Commodo", + "Consequat", + "Cupidatat", + "Deserunt", + "Dolore", + "Duis", + "Eiusmod", + "Elit", + "Enim", + "Excepteur", + "Fugiat", + "Incididunt", + "Irure", + "Labore", + "Laboris", + "Laborum", + "Mollit", + "Nostrud", + "Nulla", + "Occaecat", + "Officia", + "Pariatur", + "Proident", + "Quis", + "Reprehenderit", + "Sint", + "Sunt", + "Tempor", + "Ullamco", + "Velit", + "Veniam", + "Voluptate", + "adipisicing", + "aliqua", + "aliquip", + "amet", + "aute", + "cillum", + "commodo", + "consequat", + "cupidatat", + "deserunt", + "dolore", + "duis", + "eiusmod", + "elit", + "enim", + "excepteur", + "fugiat", + "incididunt", + "irure", + "labore", + "laboris", + "laborum", + "mollit", + "nostrud", + "nulla", + "occaecat", + "officia", + "pariatur", + "proident", + "quis", + "reprehenderit", + "sint", + "sunt", + "tempor", + "ullamco", + "velit", + "veniam", + "voluptate" + ], + "hack/benchmark/internal/starter/agent/core/ngt/option.go": [ + "dtype", + "otype" + ], + "hack/benchmark/src/singleflight/singleflight_bench_test.go": [ + "durs", + "resultsmap", + "singlefligh", + "stdsingleflight" + ], + "hack/docker/gen/main.go": [ + "Inernal", + "TARGETARCH", + "TARGETOS", + "WORKDIR", + "Workdir", + "epkg", + "gomodifytags", + "graphviz", + "tmpl", + "tonistiigi" + ], + "hack/go.mod.default": [ + "adal", + "afero", + "ajstarks", + "amqp", + "antihax", + "appengine", + "armon", + "astcopy", + "astequal", + "autorest", + "azcore", + "azidentity", + "benbjohnson", + "beorn", + "blackfriday", + "bmizerany", + "boombuler", + "buger", + "bytefmt", + "cenkalti", + "cespare", + "chunkreader", + "chzyer", + "cloudfoundry", + "cloudsql", + "cmdflag", + "colorstring", + "configsources", + "cpuguy", + "cpuid", + "creack", + "davecgh", + "dbus", + "dejavu", + "demangle", + "denisenkom", + "devigned", + "dgryski", + "difflib", + "diskv", + "dnaeon", + "easyjson", + "emicklei", + "errcheck", + "evanphx", + "eventstream", + "fastuuid", + "firestore", + "fogleman", + "fortytw", + "fpdf", + "frankban", + "freetype", + "glfw", + "goautoneg", + "gobwas", + "godbus", + "godebug", + "godeltaprof", + "gofpdf", + "gofpdi", + "gofrs", + "gofuzz", + "gogrep", + "gojsonpointer", + "gojsonreference", + "gojsonschema", + "gomega", + "gomodules", + "gonic", + "gostub", + "gotool", + "gover", + "gregjones", + "groupcache", + "grpcreplay", + "hailocab", + "hanwen", + "honnef", + "httpcache", + "httpfs", + "httphead", + "httpreplay", + "iancoleman", + "ianlancetaylor", + "imds", + "inconshreveable", + "isatty", + "jackc", + "jessevdk", + "jmespath", + "jmoiron", + "joho", + "josharian", + "jsonparser", + "jsonpointer", + "jsonreference", + "jstemmer", + "kisielk", + "kylelemons", + "leaktest", + "leodido", + "liggitt", + "logex", + "logfmt", + "logr", + "lucasb", + "mailru", + "mattn", + "matttproud", + "mitchellh", + "modocache", + "monochromegane", + "montanaflynn", + "mountinfo", + "mssqldb", + "munnerz", + "nhooyr", + "niemeyer", + "nxadm", + "objx", + "ocsql", + "onsi", + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "peterbourgon", + "pflag", + "pgio", + "pgmock", + "pgpassfile", + "pgproto", + "pgservicefile", + "pgtype", + "phpdave", + "pierrec", + "pmezard", + "prashantv", + "progressbar", + "quicktest", + "replayers", + "rogpeppe", + "russross", + "ruudk", + "sbinet", + "schollz", + "secretmanager", + "sergi", + "shlex", + "shopspring", + "shurcoo", + "sirupsen", + "spdystream", + "sqlexp", + "sqlmock", + "sqlx", + "starlark", + "stdinfo", + "stdr", + "stix", + "stoewer", + "strcase", + "strparse", + "tabwriter", + "toolsmith", + "treeprint", + "typeparams", + "udpa", + "ugorji", + "vfsgen", + "xeipuuv", + "xlab", + "xxhash", + "zapr" + ], + "hack/gorules/rules_test.go": ["analysistest"], + "hack/gorules/testdata/tests.go": ["Fmts", "newname"], + "hack/license/gen/main.go": [ + "Pipefile", + "contributorsrc", + "dirwalk", + "gitmodules", + "helmignore", + "tmpl", + "webp", + "whitesource" + ], + "hack/tools/metrics/main.go": ["lucasb", "vgsvg"], + "internal/backoff/backoff_test.go": ["timelimit"], + "internal/circuitbreaker/breaker_test.go": ["resetted"], + "internal/client/v1/client/client.go": ["Upsertor"], + "internal/client/v1/client/discoverer/discover_test.go": [ + "copylocks", + "govet" + ], + "internal/compress/gzip_option_test.go": ["zdtd"], + "internal/compress/lz4/lz4.go": ["pierrec"], + "internal/compress/zstd_option_test.go": ["zdtd"], + "internal/compress/zstd_test.go": ["decom", "vecotr"], + "internal/config/backup_test.go": ["healthcheck"], + "internal/config/blob.go": ["storaget"], + "internal/config/blob_test.go": ["CLOUDSTORAGECONFIG"], + "internal/config/cassandra.go": ["TLSCA"], + "internal/config/cassandra_test.go": ["localserial"], + "internal/config/compress_test.go": [ + "COMPRESSCORE", + "COMPRESSORREGISTERER" + ], + "internal/config/config.go": ["dnum", "rdst", "snum", "vdst"], + "internal/config/config_test.go": [ + "GETACTUALVALUE", + "GETACTUALVALUES", + "GLOBALCONFIG", + "fname" + ], + "internal/config/faiss.go": ["Voronoi", "subquantizers", "subvector"], + "internal/config/filter_test.go": ["sufix"], + "internal/config/gateway_test.go": ["bmanager", "efilter", "ireplica"], + "internal/config/grpc.go": ["Dail"], + "internal/config/grpc_test.go": [ + "DIALOPTION", + "GRPCCLIENT", + "GRPCCLIENTKEEPALIVE", + "healthcheck" + ], + "internal/config/observability_test.go": ["servicename"], + "internal/config/redis_test.go": ["Timelimit"], + "internal/config/server_test.go": ["GPRC", "GRPCKEEPALIVE"], + "internal/config/sidecar_test.go": ["AGENTSIDECAR"], + "internal/conv/conv.go": ["Atobs"], + "internal/core/algorithm/faiss/Capi.cpp": ["IVFPQ", "xids"], + "internal/core/algorithm/faiss/Capi.h": ["xids"], + "internal/core/algorithm/faiss/faiss.go": [ + "lfaiss", + "ntotal", + "strage", + "xids" + ], + "internal/core/algorithm/faiss/option.go": ["lfaiss"], + "internal/core/algorithm/ngt/Makefile": ["benchmem"], + "internal/core/algorithm/ngt/ngt.go": [ + "bulkinsert", + "bulkremove", + "cstats", + "lngt", + "ospace", + "stdlib" + ], + "internal/core/algorithm/ngt/ngt_test.go": ["bulkinsert", "ospace"], + "internal/core/algorithm/ngt/option.go": [ + "dotp", + "dproduct", + "halffloat", + "innerp", + "iproduct", + "lngt", + "nang", + "nangle", + "ncos", + "ncosine", + "normalizedang", + "normalizedcos", + "sparsejac" + ], + "internal/core/algorithm/ngt/option_test.go": ["nang", "ncos"], + "internal/db/kvs/bbolt/bbolt_test.go": ["testfunc"], + "internal/db/kvs/bbolt/option.go": ["Freelist"], + "internal/db/kvs/bbolt/option_test.go": ["Freelist"], + "internal/db/kvs/pogreb/options.go": ["deafult"], + "internal/db/kvs/pogreb/pogreb.go": ["deafult"], + "internal/db/kvs/redis/delete.go": ["Deleter"], + "internal/db/kvs/redis/hook.go": ["Cmder"], + "internal/db/kvs/redis/option_test.go": ["defult"], + "internal/db/kvs/redis/redis.go": ["Deleter", "Pipeliner"], + "internal/db/kvs/redis/redis_mock.go": ["Cmder", "Pipeliner"], + "internal/db/kvs/redis/redis_test.go": ["cslots", "gotc"], + "internal/db/nosql/cassandra/cassandra.go": [ + "Queryx", + "cmps", + "configuation", + "wlhf" + ], + "internal/db/nosql/cassandra/cassandra_test.go": [ + "Debouncer", + "Queryx", + "cmps", + "dchf", + "selete", + "unavilable", + "wlhf" + ], + "internal/db/nosql/cassandra/delete.go": ["Deleter"], + "internal/db/nosql/cassandra/option.go": [ + "TLSCA", + "eachquorum", + "localone", + "localquorum", + "localserial" + ], + "internal/db/nosql/cassandra/option_test.go": ["TLSCA"], + "internal/db/rdb/mysql/mysql_test.go": ["insertbysql", "loadcontext"], + "internal/db/rdb/mysql/option_test.go": ["valddb", "valdmysql"], + "internal/db/storage/blob/cloudstorage/cloudstorage.go": ["iblob"], + "internal/db/storage/blob/cloudstorage/cloudstorage_test.go": ["iblob"], + "internal/db/storage/blob/cloudstorage/option.go": ["urlstr"], + "internal/db/storage/blob/s3/reader/reader_test.go": ["roop"], + "internal/db/storage/blob/s3/sdk/s3/s3manager/s3manager.go": ["mngr"], + "internal/db/storage/blob/s3/session/session_test.go": [ + "btop", + "forcepathstyle", + "httpclient", + "itop", + "maxretries" + ], + "internal/errors/benchmark.go": [ + "benchjob", + "benchscenario", + "tbenchjob", + "tbenchscenario" + ], + "internal/errors/cassandra.go": ["consistetncy", "tcql"], + "internal/errors/cassandra_test.go": ["consistetncy", "tcql"], + "internal/errors/circuitbreaker.go": ["errstr"], + "internal/errors/compressor.go": ["registerers"], + "internal/errors/compressor_test.go": ["leve", "registerers"], + "internal/errors/errors_test.go": ["Unwarp", "uncomparable", "unwrapd"], + "internal/errors/file.go": ["fitos"], + "internal/errors/file_test.go": ["fitos"], + "internal/errors/lb.go": ["Insuffcient"], + "internal/errors/mysql_test.go": ["vaef"], + "internal/errors/redis.go": ["KVVK"], + "internal/errors/redis_test.go": ["KVVK"], + "internal/errors/vald_test.go": ["tvald"], + "internal/file/file_test.go": ["utiltest"], + "internal/info/info.go": ["procs", "strs"], + "internal/k8s/client/client.go": [ + "applyconfigurations", + "applycorev", + "clientgoscheme", + "snapshotv", + "volumesnapshot" + ], + "internal/k8s/job/job.go": ["batchv"], + "internal/k8s/option.go": ["mertics"], + "internal/k8s/reconciler.go": ["mertics", "mserver"], + "internal/k8s/reconciler_test.go": ["mertics"], + "internal/k8s/types.go": ["appsv", "batchv", "snapshotv", "volumesnapshot"], + "internal/k8s/vald/benchmark/api/v1/job_types.go": ["deepcopy"], + "internal/k8s/vald/benchmark/api/v1/scenario_types.go": ["deepcopy"], + "internal/k8s/vald/mirror/api/v1/target_types.go": ["deepcopy"], + "internal/log/glg/glg.go": ["DEBG", "dstr"], + "internal/log/glg/glg_test.go": ["DEBG"], + "internal/log/level/level.go": ["DEBG", "ERRO", "FATA"], + "internal/log/logger/iface.go": ["finalizer"], + "internal/log/logger/type.go": ["Atot"], + "internal/log/logger/type_test.go": ["Atot", "ZEROL"], + "internal/log/nop/nop.go": ["finalizer"], + "internal/log/option.go": ["Atot"], + "internal/log/retry/retry_test.go": ["foramt", "gotr", "wantr"], + "internal/net/control/control.go": ["boolint"], + "internal/net/control/control_test.go": ["boolint"], + "internal/net/control/control_unix.go": ["uapi"], + "internal/net/dialer.go": ["nport", "tconnectionstate", "tder"], + "internal/net/dialer_test.go": ["Nums", "copylocks", "govet"], + "internal/net/grpc/client.go": ["gbackoff", "parseable", "rebalancing"], + "internal/net/grpc/client_test.go": ["gbackoff"], + "internal/net/grpc/errdetails/errdetails.go": ["iobjs"], + "internal/net/grpc/logger/logger.go": [ + "Errorln", + "Infoln", + "Warningf", + "Warningln", + "grpclog" + ], + "internal/net/grpc/logger/logger_test.go": [ + "Errorln", + "Infoln", + "Warningf", + "Warningln", + "grpclog" + ], + "internal/net/grpc/option.go": [ + "gbackoff", + "metricinterceptor", + "traceinterceptor" + ], + "internal/net/grpc/pool/pool.go": ["tdelay"], + "internal/net/grpc/proto/proto.go": ["protoiface"], + "internal/net/grpc/server_test.go": ["channelz"], + "internal/net/http/client/option.go": ["Alives", "Keepalives"], + "internal/net/http/client/option_test.go": ["Alives", "Keepalives"], + "internal/net/http/dump/dump_test.go": ["hoge"], + "internal/net/http/metrics/pprof.go": [ + "felixge", + "fgprof", + "godeltaprof", + "pyprof", + "threadcreate" + ], + "internal/net/http/middleware/timeout_test.go": ["extermemly"], + "internal/net/http/transport/roundtrip.go": ["roundtripper"], + "internal/net/net.go": ["hostport"], + "internal/net/net_test.go": ["hostport"], + "internal/observability/exporter/otlp/otlp.go": [ + "otlpmetric", + "otlpmetricgrpc", + "otlptracegrpc", + "semconv" + ], + "internal/observability/metrics/grpc/grpc.go": ["Desctiption"], + "internal/observability/metrics/mem/index/index.go": ["mstats"], + "internal/observability/metrics/mem/mem.go": [ + "Memstats", + "Shmem", + "buckhash", + "mcache", + "mspan", + "oinsts", + "shmem", + "toal", + "vmdata", + "vmexe", + "vmlck", + "vmlib", + "vmpin", + "vmpte", + "vmstk", + "vmswap" + ], + "internal/observability/metrics/mem/mem_test.go": ["Memstats"], + "internal/observability/trace/status.go": ["RPCGRPC", "ocodes", "semconv"], + "internal/params/params.go": ["commnad"], + "internal/runner/runner.go": ["maxprocs", "mfunc", "timelocation"], + "internal/safety/safety.go": ["revcover", "runtimer"], + "internal/servers/option.go": ["strg"], + "internal/servers/option_test.go": ["gsrv", "strg"], + "internal/servers/server/option.go": [ + "accesslog", + "accessloginterceptor", + "metricinterceptor", + "recoverinterceptor", + "traceinterceptor" + ], + "internal/servers/server/server_test.go": ["prestart"], + "internal/servers/servers_test.go": ["strg"], + "internal/strings/strings_benchmark_test.go": ["tstr"], + "internal/sync/errgroup/group_test.go": ["acquireings", "goroutne"], + "internal/sync/semaphore/semaphore.go": ["cancelation"], + "internal/sync/semaphore/semaphore_example_test.go": [ + "Collatz", + "collatz", + "nonpositive" + ], + "internal/sync/semaphore/semaphore_test.go": ["Doesnt", "unacquired"], + "internal/sync/singleflight/singleflight.go": ["chans", "dups"], + "internal/sync/singleflight/singleflight_test.go": ["DOCHAN", "unparam"], + "internal/test/data/hdf5/hdf5.go": ["Keepalives", "Neighors"], + "internal/test/data/hdf5/option.go": ["dataname"], + "internal/test/data/vector/gen.go": ["irand"], + "internal/test/mock/grpc_testify_mock.go": ["losm", "usecases"], + "internal/test/mock/k8s/client.go": ["crclient"], + "internal/timeutil/rate/rate.go": ["ratelimit"], + "internal/timeutil/rate/rate_test.go": ["ratelimit"], + "internal/timeutil/time_test.go": ["dummystring", "hoge"], + "internal/unit/unit.go": ["bytefmt", "cloudfoundry"], + "internal/version/version.go": ["curv"], + "internal/worker/worker_test.go": ["testname"], + "k8s/external/minio/deployment.yaml": ["ACCESSKEY", "SECRETKEY"], + "k8s/external/minio/mb-job.yaml": ["ACCESSKEY", "SECRETKEY"], + "k8s/metrics/grafana/dashboards/00-vald-cluster-overview.yaml": [ + "Misscheduled", + "Qxya", + "misscheduled" + ], + "k8s/metrics/grafana/dashboards/02-vald-discoverer.yaml": [ + "Jkemc", + "Versin" + ], + "k8s/metrics/grafana/dashboards/05-vald-index-manager.yaml": ["jowe"], + "k8s/metrics/grafana/dashboards/10-vald-benchmark-operator.yaml": [ + "Versin", + "fdewjfx", + "jkxz" + ], + "k8s/metrics/grafana/dashboards/99-vald-agent-memory.yaml": [ + "Memstats", + "buckhash", + "mcache", + "mspan", + "vmdata", + "vmexe", + "vmlck", + "vmlib", + "vmpin", + "vmpte", + "vmstk", + "vmswap" + ], + "k8s/metrics/jaeger/jaeger.yaml": ["jaegertracing"], + "k8s/metrics/loki/loki.yaml": [ + "boltdb", + "ingester", + "inmemory", + "kvstore", + "lifecycler" + ], + "k8s/metrics/loki/promtail.yaml": [ + "labelmap", + "promtail", + "varlibdockercontainers", + "varlog" + ], + "k8s/metrics/prometheus/configmap.yaml": ["cadvisor", "labelmap"], + "k8s/metrics/pyroscope/README.md": ["mafests"], + "k8s/metrics/pyroscope/base/configmap.yaml": ["labelmap"], + "k8s/metrics/pyroscope/base/daemonset.yaml": ["ebpfspy"], + "k8s/metrics/pyroscope/base/kustomization.yaml": ["clusterrolebinding"], + "k8s/metrics/tempo/jaeger-agent.yaml": ["jaegertracing"], + "k8s/metrics/tempo/tempo.yaml": [ + "blocklist", + "ingester", + "inmemory", + "kvstore", + "lifecycler" + ], + "k8s/operator/helm/clusterrole.yaml": [ + "clusterrolebindings", + "clusterroles", + "customresourcedefinitions", + "horizontalpodautoscalers", + "networkpolicies", + "persistentvolumeclaims", + "poddisruptionbudgets", + "priorityclasses", + "serviceaccounts", + "statefulsets" + ], + "k8s/operator/helm/crds/valdhelmoperatorrelease.yaml": ["vhors"], + "k8s/operator/helm/operator.yaml": ["readyz"], + "k8s/tools/benchmark/job/clusterrolebinding.yaml": ["rolebinding"], + "k8s/tools/benchmark/job/serviceaccount.yaml": ["Versoin"], + "k8s/tools/benchmark/operator/clusterrole.yaml": ["deletecollection"], + "k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml": ["vbjs"], + "k8s/tools/benchmark/operator/crds/valdbenchmarkoperatorrelease.yaml": [ + "valdbenchmarkoperator", + "valdbenchmarkoperatorreleases", + "vbor", + "vbors" + ], + "k8s/tools/benchmark/operator/crds/valdbenchmarkscenario.yaml": ["vbss"], + "pkg/agent/core/faiss/handler/grpc/search.go": ["createing"], + "pkg/agent/core/faiss/service/faiss.go": [ + "Voronoi", + "ntotal", + "saveindex", + "subquantizers", + "tpath", + "tvald" + ], + "pkg/agent/core/faiss/service/option.go": ["bdbs", "brnd"], + "pkg/agent/core/faiss/usecase/agentd.go": ["faissmetrics"], + "pkg/agent/core/ngt/handler/grpc/flush.go": ["cnts"], + "pkg/agent/core/ngt/handler/grpc/index_test.go": ["exteneral"], + "pkg/agent/core/ngt/handler/grpc/insert.go": ["vmap"], + "pkg/agent/core/ngt/handler/grpc/insert_test.go": [ + "Testingcase", + "joind", + "nonexistid", + "pushinsert" + ], + "pkg/agent/core/ngt/handler/grpc/object_test.go": ["testfunc", "tmock"], + "pkg/agent/core/ngt/handler/grpc/update.go": ["idis", "vmap"], + "pkg/agent/core/ngt/handler/grpc/update_test.go": ["Testint"], + "pkg/agent/core/ngt/service/ngt.go": [ + "Nopvq", + "nkvs", + "nobic", + "nopvq", + "saveindex", + "toid", + "tvald" + ], + "pkg/agent/core/ngt/service/ngt_test.go": [ + "additionaldigits", + "kvald", + "metafile", + "nobic", + "nopvq", + "testfunc" + ], + "pkg/agent/core/ngt/service/option.go": ["bdbs", "brnd"], + "pkg/agent/core/ngt/usecase/agentd.go": ["memmetrics", "ngtmetrics"], + "pkg/agent/internal/vqueue/queue.go": ["uninserted"], + "pkg/agent/internal/vqueue/stateful_test.go": ["getvector"], + "pkg/agent/sidecar/service/restorer/restorer.go": ["Typeflag"], + "pkg/discoverer/k8s/handler/grpc/handler.go": [ + "ngroup", + "pgroup", + "sgroup" + ], + "pkg/discoverer/k8s/handler/grpc/handler_test.go": [ + "ngroup", + "pgroup", + "sgroup" + ], + "pkg/discoverer/k8s/service/discover.go": [ + "mnode", + "mpod", + "reconciation", + "svcsmap" + ], + "pkg/discoverer/k8s/service/discover_test.go": ["mnode", "mpod"], + "pkg/discoverer/k8s/usecase/discovered.go": ["unbackupped"], + "pkg/gateway/lb/handler/grpc/aggregation.go": [ + "Insuffcient", + "fdist", + "fmax", + "timeoutage" + ], + "pkg/gateway/lb/handler/grpc/handler.go": [ + "cnts", + "indegrees", + "outdegrees" + ], + "pkg/gateway/lb/handler/grpc/handler_test.go": ["Cnts"], + "pkg/gateway/lb/handler/grpc/pairing_heap_test.go": ["gids"], + "pkg/gateway/lb/handler/grpc/search_benchmark_test.go": ["datas"], + "pkg/gateway/mirror/handler/grpc/handler_test.go": ["clientmock", "cmap"], + "pkg/gateway/mirror/service/discovery.go": ["ctgt", "ptgt"], + "pkg/gateway/mirror/service/discovery_option.go": ["datacenter"], + "pkg/gateway/mirror/service/mirror_test.go": ["grpcmock"], + "pkg/gateway/mirror/usecase/vald.go": ["mirrormetrics"], + "pkg/index/job/creation/service/indexer_test.go": [ + "clientmock", + "grpcmock" + ], + "pkg/index/job/save/service/indexer_test.go": ["clientmock", "grpcmock"], + "pkg/manager/index/usecase/indexer.go": ["indexmetrics"], + "pkg/tools/benchmark/job/config/config.go": ["JOBNAME", "JOBNAMESPACE"], + "pkg/tools/benchmark/job/service/insert.go": ["Prometeus"], + "pkg/tools/benchmark/job/service/job.go": ["Neighors", "benchjob"], + "pkg/tools/benchmark/job/service/option.go": ["Concurency", "bjns"], + "pkg/tools/benchmark/job/service/option_test.go": ["Concurency", "bjns"], + "pkg/tools/benchmark/job/usecase/benchmarkd.go": [ + "Concurency", + "gcli", + "unbackupped", + "usecases", + "vcli" + ], + "pkg/tools/benchmark/operator/service/operator.go": [ + "Progation", + "benchjob", + "benchjobs", + "benchmarkjob", + "benchscenario", + "bjob", + "cbjl", + "cbsl", + "cjobs", + "rcticker", + "wating" + ], + "pkg/tools/benchmark/operator/service/operator_test.go": [ + "benchjobs", + "minsit", + "scneario" + ], + "pkg/tools/benchmark/operator/usecase/benchmarkd.go": [ + "benchmarkmetrics", + "unbackupped", + "usecases" + ], + "pkg/tools/cli/loadtest/assets/dataset.go": ["kosarak", "nytimes"], + "pkg/tools/cli/loadtest/assets/hdf5_loader.go": ["dset", "npoints"], + "pkg/tools/cli/loadtest/assets/hdf5_loader_test.go": ["dset", "npoints"], + "pkg/tools/cli/loadtest/config/config.go": ["streaminsert"], + "rust/libs/ngt-rs/Cargo.toml": ["miette"], + "rust/libs/ngt-rs/build.rs": [ + "BFLOAT", + "DNGT", + "dylib", + "fopenmp", + "gomp", + "miette", + "rustc" + ], + "rust/libs/ngt-rs/src/input.cpp": ["cpath", "ngtresults", "vquery"], + "rust/libs/ngt-rs/src/lib.rs": ["repr"], + "rust/libs/observability/Cargo.toml": ["reqwest", "scopeguard", "serde"], + "rust/libs/observability/src/macros.rs": ["Updown"], + "rust/libs/proto/src/payload.v1.rs": ["repr"], + "tests/chaos/chart/README.md": ["kbps", "minburst", "peakrate"], + "tests/chaos/chart/templates/network/bandwidth.yaml": [ + "minburst", + "peakrate" + ], + "tests/chaos/chart/values.yaml": ["kbps", "minburst", "peakrate"], + "tests/e2e/crud/crud_test.go": ["ECRUD"], + "tests/e2e/kubernetes/client/client.go": [ + "Clientset", + "clientcmd", + "clientset" + ], + "tests/e2e/kubernetes/kubectl/kubectl.go": ["rollouts", "subcmds"], + "tests/e2e/kubernetes/portforward/portforward.go": [ + "genericclioptions", + "portforwarder", + "spdy", + "upgrader" + ], + "tests/e2e/operation/stream.go": ["evalidator", "svalidator"] + } } diff --git a/.gitattributes b/.gitattributes index 97ad7b12d1..853fd7bd69 100644 --- a/.gitattributes +++ b/.gitattributes @@ -13,6 +13,52 @@ # See the License for the specific language governing permissions and # limitations under the License. # +* text encoding=utf-8 eol=lf -# https://github.com/github/linguist/blob/master/docs/overrides.md -.all-contributorsrc linguist-language=JSON +*.config text encoding=utf-8 eol=lf +*.cpp text encoding=utf-8 eol=lf diff=cpp +*.css text encoding=utf-8 eol=lf +*.csv text encoding=utf-8 eol=lf linguist-language=CSV +*.go text encoding=utf-8 eol=lf diff=go +*.h text encoding=utf-8 eol=lf diff=cpp +*.hpp text encoding=utf-8 eol=lf diff=cpp +*.json text encoding=utf-8 eol=lf diff=json linguist-language=JSON +*.md text encoding=utf-8 eol=lf +*.proto text encoding=utf-8 eol=lf diff=proto +*.py text encoding=utf-8 eol=lf diff=python +*.rs text encoding=utf-8 eol=lf diff=rust +*.txt text encoding=utf-8 eol=lf +*.xml text encoding=utf-8 eol=lf diff=html +*.yaml text encoding=utf-8 eol=lf +*.yml text encoding=utf-8 eol=lf +.all-contributorsrc text encoding=utf-8 eol=lf linguist-language=JSON +Dockerfile text encoding=utf-8 eol=lf +LICENSE text encoding=utf-8 eol=lf + + +*.a binary +*.gz binary +*.jpeg binary +*.o binary +*.pdf binary +*.so binary +*.zip binary +*.jpg binary filter=lfs diff=lfs merge=lfs -text +*.png binary filter=lfs diff=lfs merge=lfs -text +*.ai binary filter=lfs diff=lfs merge=lfs -text +*.psd binary filter=lfs diff=lfs merge=lfs -text + +*.json merge=ours +*.mod merge=ours +*.sum merge=ours +*.svg merge=ours + + +vendor/** linguist-vendored + +*.tmp export-ignore +*.tmp.* export-ignore +*~ export-ignore +.DS_Store export-ignore +.~lock.* export-ignore +Thumbs.db export-ignore diff --git a/.gitfiles b/.gitfiles index 9b3e70cd51..be6e94472e 100644 --- a/.gitfiles +++ b/.gitfiles @@ -46,7 +46,7 @@ .github/chatops_permissions.yaml .github/codeql/codeql-config.yaml .github/conflint.yaml -.github/dependabot.yml +.github/dependabot.yaml .github/helm/values/vald-mirror-target.yaml .github/helm/values/values-agent-sidecar.yaml .github/helm/values/values-chaos.yaml @@ -60,72 +60,74 @@ .github/helm/values/values-readreplica.yaml .github/issue_label_bot.yaml .github/kubelinter.yaml -.github/labeler.yml +.github/labeler.yaml .github/valdrelease/valdrelease.yaml -.github/workflows/_detect-ci-container.yml +.github/workflows/_detect-ci-container.yaml .github/workflows/_docker-image-scan.yaml .github/workflows/_docker-image.yaml -.github/workflows/_release-pr.yml +.github/workflows/_release-pr.yaml .github/workflows/_update-protobuf.yaml -.github/workflows/backport.yml -.github/workflows/build-binaries.yml -.github/workflows/build-protobuf.yml -.github/workflows/chatops-help.yml -.github/workflows/chatops.yml -.github/workflows/check-conflict.yml +.github/workflows/backport.yaml +.github/workflows/build-binaries.yaml +.github/workflows/build-protobuf.yaml +.github/workflows/chatops-help.yaml +.github/workflows/chatops.yaml +.github/workflows/check-conflict.yaml .github/workflows/codeql-analysis.yml -.github/workflows/coverage.yml -.github/workflows/detect-internal-config-changes.yml -.github/workflows/dockers-agent-faiss-image.yml -.github/workflows/dockers-agent-image.yml -.github/workflows/dockers-agent-ngt-image.yml -.github/workflows/dockers-agent-sidecar-image.yml -.github/workflows/dockers-benchmark-job-image.yml +.github/workflows/coverage.yaml +.github/workflows/detect-internal-config-changes.yaml +.github/workflows/dockers-agent-faiss-image.yaml +.github/workflows/dockers-agent-image.yaml +.github/workflows/dockers-agent-ngt-image.yaml +.github/workflows/dockers-agent-sidecar-image.yaml +.github/workflows/dockers-benchmark-job-image.yaml .github/workflows/dockers-benchmark-operator-image.yaml .github/workflows/dockers-binfmt-image.yaml -.github/workflows/dockers-buildbase-image.yml +.github/workflows/dockers-buildbase-image.yaml .github/workflows/dockers-buildkit-image.yaml .github/workflows/dockers-buildkit-syft-scanner-image.yaml -.github/workflows/dockers-ci-container-image.yml -.github/workflows/dockers-dev-container-image.yml -.github/workflows/dockers-discoverer-k8s-image.yml -.github/workflows/dockers-gateway-filter-image.yml -.github/workflows/dockers-gateway-lb-image.yml +.github/workflows/dockers-ci-container-image.yaml +.github/workflows/dockers-dev-container-image.yaml +.github/workflows/dockers-discoverer-k8s-image.yaml +.github/workflows/dockers-example-client-image.yaml +.github/workflows/dockers-gateway-filter-image.yaml +.github/workflows/dockers-gateway-lb-image.yaml .github/workflows/dockers-gateway-mirror-image.yaml -.github/workflows/dockers-helm-operator-image.yml -.github/workflows/dockers-image-scan.yml -.github/workflows/dockers-index-correction.yml -.github/workflows/dockers-index-creation.yml -.github/workflows/dockers-index-operator.yml -.github/workflows/dockers-index-save.yml -.github/workflows/dockers-loadtest-image.yml -.github/workflows/dockers-manager-index-image.yml -.github/workflows/dockers-readreplica-rotate.yml +.github/workflows/dockers-helm-operator-image.yaml +.github/workflows/dockers-image-scan.yaml +.github/workflows/dockers-index-correction-image.yaml +.github/workflows/dockers-index-creation-image.yaml +.github/workflows/dockers-index-deletion-image.yaml +.github/workflows/dockers-index-operator-image.yaml +.github/workflows/dockers-index-save-image.yaml +.github/workflows/dockers-loadtest-image.yaml +.github/workflows/dockers-manager-index-image.yaml +.github/workflows/dockers-readreplica-rotate-image.yaml .github/workflows/dockers-release-branch-image.yaml .github/workflows/e2e-chaos.yaml .github/workflows/e2e-code-bench-agent.yaml -.github/workflows/e2e-max-dim.yml -.github/workflows/e2e-profiling.yml -.github/workflows/e2e.yml -.github/workflows/format.yml -.github/workflows/fossa.yml -.github/workflows/helm-lint.yml -.github/workflows/helm.yml +.github/workflows/e2e-max-dim.yaml +.github/workflows/e2e-profiling.yaml +.github/workflows/e2e.yaml +.github/workflows/format.yaml +.github/workflows/fossa.yaml +.github/workflows/helm-lint.yaml +.github/workflows/helm.yaml .github/workflows/issue-metrics.yaml -.github/workflows/labeler.yml -.github/workflows/release.yml -.github/workflows/reviewdog-hadolint.yml -.github/workflows/reviewdog-k8s.yml -.github/workflows/reviewdog-markdown.yml -.github/workflows/reviewdog.yml +.github/workflows/labeler.yaml +.github/workflows/release.yaml +.github/workflows/reviewdog-hadolint.yaml +.github/workflows/reviewdog-k8s.yaml +.github/workflows/reviewdog-markdown.yaml +.github/workflows/reviewdog.yaml .github/workflows/semver-major-minor.yaml .github/workflows/semver-patch.yaml -.github/workflows/test-hack.yml +.github/workflows/test-hack.yaml .github/workflows/unit-test.yaml -.github/workflows/update-actions.yaml +.github/workflows/update-deps.yaml .github/workflows/update-protobuf.yaml -.github/workflows/update-pull-request-and-issue-template.yml -.github/workflows/update-web-docs.yml +.github/workflows/update-pull-request-and-issue-template.yaml +.github/workflows/update-web-docs.yaml .gitignore .golangci.yml .prh.yaml @@ -140,7 +142,6 @@ Makefile Makefile.d/actions.mk Makefile.d/bench.mk Makefile.d/build.mk -Makefile.d/client.mk Makefile.d/dependencies.mk Makefile.d/docker.mk Makefile.d/e2e.mk @@ -168,6 +169,8 @@ apis/grpc/v1/filter/egress/egress_filter.pb.go apis/grpc/v1/filter/egress/egress_filter_vtproto.pb.go apis/grpc/v1/filter/ingress/ingress_filter.pb.go apis/grpc/v1/filter/ingress/ingress_filter_vtproto.pb.go +apis/grpc/v1/meta/meta.pb.go +apis/grpc/v1/meta/meta_vtproto.pb.go apis/grpc/v1/mirror/mirror.go apis/grpc/v1/mirror/mirror.pb.go apis/grpc/v1/mirror/mirror_vtproto.pb.go @@ -197,13 +200,12 @@ apis/grpc/v1/vald/update_vtproto.pb.go apis/grpc/v1/vald/upsert.pb.go apis/grpc/v1/vald/upsert_vtproto.pb.go apis/grpc/v1/vald/vald.go -apis/proto/buf.lock -apis/proto/buf.yaml apis/proto/v1/agent/core/agent.proto apis/proto/v1/agent/sidecar/sidecar.proto apis/proto/v1/discoverer/discoverer.proto apis/proto/v1/filter/egress/egress_filter.proto apis/proto/v1/filter/ingress/ingress_filter.proto +apis/proto/v1/meta/meta.proto apis/proto/v1/mirror/mirror.proto apis/proto/v1/payload/payload.proto apis/proto/v1/rpc/errdetails/error_details.proto @@ -221,6 +223,7 @@ apis/swagger/v1/agent/sidecar/sidecar.swagger.json apis/swagger/v1/discoverer/discoverer.swagger.json apis/swagger/v1/filter/egress/egress_filter.swagger.json apis/swagger/v1/filter/ingress/ingress_filter.swagger.json +apis/swagger/v1/meta/meta.swagger.json apis/swagger/v1/mirror/mirror.swagger.json apis/swagger/v1/payload/payload.swagger.json apis/swagger/v1/rpc/errdetails/error_details.swagger.json @@ -300,7 +303,8 @@ assets/test/templates/option/function.tmpl assets/test/templates/option/header.tmpl assets/test/templates/option/results.tmpl buf.gen.yaml -buf.work.yaml +buf.lock +buf.yaml charts/vald-benchmark-operator/Chart.yaml charts/vald-benchmark-operator/README.md charts/vald-benchmark-operator/README.md.gotmpl @@ -486,6 +490,8 @@ cmd/index/job/correction/sample.yaml cmd/index/job/creation/main.go cmd/index/job/creation/main_test.go cmd/index/job/creation/sample.yaml +cmd/index/job/deletion/main.go +cmd/index/job/deletion/sample.yaml cmd/index/job/readreplica/rotate/main.go cmd/index/job/readreplica/rotate/main_test.go cmd/index/job/readreplica/rotate/sample.yaml @@ -511,6 +517,7 @@ cmd/tools/cli/loadtest/main_test.go cmd/tools/cli/loadtest/sample.yaml cmd/tools/cli/vdctl/main.go cmd/tools/cli/vdctl/main_test.go +codecov.yaml design/Vald Architecture Assets.drawio design/Vald Architecture Dataflow.drawio design/Vald Architecture Overview.drawio @@ -532,6 +539,7 @@ dockers/dev/Dockerfile dockers/dev/README.md dockers/discoverer/k8s/Dockerfile dockers/discoverer/k8s/README.md +dockers/example/client/Dockerfile dockers/gateway/filter/Dockerfile dockers/gateway/filter/README.md dockers/gateway/lb/Dockerfile @@ -539,6 +547,7 @@ dockers/gateway/lb/README.md dockers/gateway/mirror/Dockerfile dockers/index/job/correction/Dockerfile dockers/index/job/creation/Dockerfile +dockers/index/job/deletion/Dockerfile dockers/index/job/readreplica/rotate/Dockerfile dockers/index/job/save/Dockerfile dockers/index/operator/Dockerfile @@ -698,6 +707,8 @@ hack/benchmark/internal/starter/gateway/vald/vald_test.go hack/benchmark/internal/starter/starter.go hack/benchmark/metrics/metrics.go hack/benchmark/src/singleflight/singleflight_bench_test.go +hack/cspell/main.go +hack/cspell/main_test.go hack/docker/gen/main.go hack/git/hooks/pre-commit hack/go.mod.default @@ -710,6 +721,8 @@ hack/helm/schema/gen/main.go hack/helm/schema/gen/main_test.go hack/license/gen/main.go hack/license/gen/main_test.go +hack/tools/deadlink/index.html +hack/tools/deadlink/main.go hack/tools/kvsdb/main.go hack/tools/metrics/main.go hack/tools/metrics/main_test.go @@ -827,6 +840,7 @@ internal/config/grpc_test.go internal/config/index.go internal/config/index_creation.go internal/config/index_creation_test.go +internal/config/index_deleter.go internal/config/index_operator.go internal/config/index_operator_test.go internal/config/index_save.go @@ -879,6 +893,10 @@ internal/core/algorithm/ngt/ngt_bench_test.go internal/core/algorithm/ngt/ngt_test.go internal/core/algorithm/ngt/option.go internal/core/algorithm/ngt/option_test.go +internal/core/algorithm/usearch/option.go +internal/core/algorithm/usearch/option_test.go +internal/core/algorithm/usearch/usearch.go +internal/core/algorithm/usearch/usearch_test.go internal/db/kvs/bbolt/bbolt.go internal/db/kvs/bbolt/bbolt_test.go internal/db/kvs/bbolt/option.go @@ -1021,6 +1039,8 @@ internal/errors/storage.go internal/errors/tls.go internal/errors/unit.go internal/errors/unit_test.go +internal/errors/usearch.go +internal/errors/usearch_test.go internal/errors/vald.go internal/errors/vald_test.go internal/errors/vqueue.go @@ -1150,6 +1170,7 @@ internal/net/grpc/client_test.go internal/net/grpc/codec.go internal/net/grpc/codec_test.go internal/net/grpc/codes/codes.go +internal/net/grpc/codes/codes_test.go internal/net/grpc/context.go internal/net/grpc/context_test.go internal/net/grpc/credentials/credentials.go @@ -1227,6 +1248,8 @@ internal/net/net.go internal/net/net_test.go internal/net/option.go internal/net/option_test.go +internal/net/quic/conn.go +internal/net/quic/listener.go internal/observability/attribute/attribute.go internal/observability/attribute/attribute_test.go internal/observability/exporter/exporter.go @@ -1277,6 +1300,8 @@ internal/observability/trace/status_test.go internal/observability/trace/trace.go internal/observability/trace/trace_option.go internal/observability/trace/trace_test.go +internal/os/hostname.go +internal/os/hostname_test.go internal/params/option.go internal/params/option_test.go internal/params/params.go @@ -1422,6 +1447,15 @@ k8s/gateway/gateway/lb/hpa.yaml k8s/gateway/gateway/lb/pdb.yaml k8s/gateway/gateway/lb/priorityclass.yaml k8s/gateway/gateway/lb/svc.yaml +k8s/gateway/gateway/mirror/clusterrole.yaml +k8s/gateway/gateway/mirror/clusterrolebinding.yaml +k8s/gateway/gateway/mirror/configmap.yaml +k8s/gateway/gateway/mirror/deployment.yaml +k8s/gateway/gateway/mirror/hpa.yaml +k8s/gateway/gateway/mirror/pdb.yaml +k8s/gateway/gateway/mirror/priorityclass.yaml +k8s/gateway/gateway/mirror/serviceaccount.yaml +k8s/gateway/gateway/mirror/svc.yaml k8s/index/job/correction/configmap.yaml k8s/index/job/correction/cronjob.yaml k8s/index/job/creation/configmap.yaml @@ -1594,6 +1628,8 @@ pkg/agent/internal/kvs/kvs.go pkg/agent/internal/kvs/kvs_test.go pkg/agent/internal/kvs/option.go pkg/agent/internal/kvs/option_test.go +pkg/agent/internal/memstore/data_manager.go +pkg/agent/internal/memstore/data_manager_test.go pkg/agent/internal/metadata/metadata.go pkg/agent/internal/metadata/metadata_test.go pkg/agent/internal/vqueue/option.go @@ -1680,7 +1716,6 @@ pkg/gateway/filter/router/router.go pkg/gateway/filter/router/router_test.go pkg/gateway/filter/usecase/vald.go pkg/gateway/filter/usecase/vald_test.go -pkg/gateway/internal/location/location_test.go pkg/gateway/lb/README.md pkg/gateway/lb/config/config.go pkg/gateway/lb/config/config_test.go @@ -1759,6 +1794,10 @@ pkg/index/job/creation/service/options.go pkg/index/job/creation/service/options_test.go pkg/index/job/creation/usecase/creation.go pkg/index/job/creation/usecase/creation_test.go +pkg/index/job/deletion/config/config.go +pkg/index/job/deletion/service/deleter.go +pkg/index/job/deletion/service/options.go +pkg/index/job/deletion/usecase/deletion.go pkg/index/job/readreplica/rotate/config/config.go pkg/index/job/readreplica/rotate/config/config_test.go pkg/index/job/readreplica/rotate/service/options.go @@ -1897,19 +1936,31 @@ rust/bin/agent/src/handler/search.rs rust/bin/agent/src/handler/update.rs rust/bin/agent/src/handler/upsert.rs rust/bin/agent/src/main.rs -rust/libs/ngt-rs/Cargo.toml -rust/libs/ngt-rs/build.rs -rust/libs/ngt-rs/src/input.cpp -rust/libs/ngt-rs/src/input.h -rust/libs/ngt-rs/src/lib.rs -rust/libs/ngt/Cargo.toml -rust/libs/ngt/src/lib.rs +rust/bin/meta/Cargo.toml +rust/bin/meta/src/handler.rs +rust/bin/meta/src/handler/meta.rs +rust/bin/meta/src/main.rs +rust/libs/algorithm/Cargo.toml +rust/libs/algorithm/src/lib.rs +rust/libs/algorithms/faiss/Cargo.toml +rust/libs/algorithms/faiss/src/lib.rs +rust/libs/algorithms/ngt/Cargo.toml +rust/libs/algorithms/ngt/build.rs +rust/libs/algorithms/ngt/src/input.cpp +rust/libs/algorithms/ngt/src/input.h +rust/libs/algorithms/ngt/src/lib.rs +rust/libs/observability/Cargo.toml +rust/libs/observability/src/config.rs +rust/libs/observability/src/lib.rs +rust/libs/observability/src/macros.rs +rust/libs/observability/src/observability.rs rust/libs/proto/Cargo.toml rust/libs/proto/src/core.v1.tonic.rs rust/libs/proto/src/discoverer.v1.tonic.rs rust/libs/proto/src/filter.egress.v1.tonic.rs rust/libs/proto/src/filter.ingress.v1.tonic.rs rust/libs/proto/src/lib.rs +rust/libs/proto/src/meta.v1.tonic.rs rust/libs/proto/src/mirror.v1.tonic.rs rust/libs/proto/src/payload.v1.rs rust/libs/proto/src/rpc.v1.rs @@ -1965,7 +2016,7 @@ versions/PROTOBUF_VERSION versions/REVIEWDOG_VERSION versions/RUST_VERSION versions/TELEPRESENCE_VERSION -versions/VALDCLI_VERSION +versions/USEARCH_VERSION versions/VALD_VERSION versions/YQ_VERSION versions/ZLIB_VERSION diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index fb1b90eea9..a6a944d13b 100755 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -22,11 +22,11 @@ assignees: "" -- Vald Version: v1.7.13 -- Go Version: v1.23.0 -- Rust Version: v1.80.0 -- Docker Version: v27.1.1 -- Kubernetes Version: v1.30.3 -- Helm Version: v3.15.3 -- NGT Version: v2.2.4 -- Faiss Version: v1.8.0 +- Vald Version: v1.7.15 +- Go Version: v1.23.4 +- Rust Version: v1.83.0 +- Docker Version: v27.4.0 +- Kubernetes Version: v1.32.0 +- Helm Version: v3.16.3 +- NGT Version: v2.3.5 +- Faiss Version: v1.9.0 diff --git a/.github/ISSUE_TEMPLATE/security_issue_report.md b/.github/ISSUE_TEMPLATE/security_issue_report.md index dae86630d3..161f83c4c6 100644 --- a/.github/ISSUE_TEMPLATE/security_issue_report.md +++ b/.github/ISSUE_TEMPLATE/security_issue_report.md @@ -16,11 +16,11 @@ assignees: "" -- Vald Version: v1.7.13 -- Go Version: v1.23.0 -- Rust Version: v1.80.0 -- Docker Version: v27.1.1 -- Kubernetes Version: v1.30.3 -- Helm Version: v3.15.3 -- NGT Version: v2.2.4 -- Faiss Version: v1.8.0 +- Vald Version: v1.7.15 +- Go Version: v1.23.4 +- Rust Version: v1.83.0 +- Docker Version: v27.4.0 +- Kubernetes Version: v1.32.0 +- Helm Version: v3.16.3 +- NGT Version: v2.3.5 +- Faiss Version: v1.9.0 diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 7b3bc08352..bdd064fb1e 100755 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -3,7 +3,7 @@ ### Description - + ### Related Issue @@ -15,14 +15,14 @@ ### Versions -- Vald Version: v1.7.13 -- Go Version: v1.23.0 -- Rust Version: v1.80.0 -- Docker Version: v27.1.1 -- Kubernetes Version: v1.30.3 -- Helm Version: v3.15.3 -- NGT Version: v2.2.4 -- Faiss Version: v1.8.0 +- Vald Version: v1.7.15 +- Go Version: v1.23.4 +- Rust Version: v1.83.0 +- Docker Version: v27.4.0 +- Kubernetes Version: v1.32.0 +- Helm Version: v3.16.3 +- NGT Version: v2.3.5 +- Faiss Version: v1.9.0 ### Checklist @@ -34,4 +34,4 @@ ### Special notes for your reviewer - + diff --git a/.github/actions/deploy-chaos-mesh/action.yaml b/.github/actions/deploy-chaos-mesh/action.yaml index 9de5de8e61..ad83644b86 100644 --- a/.github/actions/deploy-chaos-mesh/action.yaml +++ b/.github/actions/deploy-chaos-mesh/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Deploy Chaos Mesh" -description: "A action to deploy Chaos Mesh" +description: "An action to deploy Chaos Mesh" inputs: chaos_mesh_version: description: "The Chaos Mesh version to use. The default version is `versions/CHAOS_MESH_VERSION`" diff --git a/.github/actions/detect-docker-image-tags/action.yaml b/.github/actions/detect-docker-image-tags/action.yaml index 896d9212a1..fbadf512be 100644 --- a/.github/actions/detect-docker-image-tags/action.yaml +++ b/.github/actions/detect-docker-image-tags/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Detect Docker image tags" -description: "A action to detect Docker image tags" +description: "An action to detect Docker image tags" inputs: tags: description: "Tag name to check whether exists or not" @@ -48,6 +48,7 @@ runs: ["vdaas/vald-mirror-gateway"]="gateway.mirror.image.tag" ["vdaas/vald-manager-index"]="manager.index.image.tag" ["vdaas/vald-index-creation"]="manager.index.creator.image.tag" + ["vdaas/vald-index-deletion"]="manager.index.delete.image.tag" ["vdaas/vald-index-save"]="manager.index.saver.image.tag" ["vdaas/vald-readreplica-rotate"]="manager.index.readreplica.rotator.image.tag" ["vdaas/vald-helm-operator"]="image.tag" diff --git a/.github/actions/determine-docker-image-tag/action.yaml b/.github/actions/determine-docker-image-tag/action.yaml index e385af0e66..300830a56e 100644 --- a/.github/actions/determine-docker-image-tag/action.yaml +++ b/.github/actions/determine-docker-image-tag/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Determine Docker image tag" -description: "A action to determine Docker image tag" +description: "An action to determine Docker image tag" outputs: TAGS: description: "docker image tag list" diff --git a/.github/actions/docker-build/action.yaml b/.github/actions/docker-build/action.yaml index 84792b6044..bceee56c6e 100644 --- a/.github/actions/docker-build/action.yaml +++ b/.github/actions/docker-build/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Build Docker images" -description: "A action to build Docker images and publish them" +description: "An action to build Docker images and publish them" inputs: target: description: "Build target" diff --git a/.github/actions/dump-context/action.yaml b/.github/actions/dump-context/action.yaml index 4afd277e44..861f974c81 100644 --- a/.github/actions/dump-context/action.yaml +++ b/.github/actions/dump-context/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Dump context to log" -description: "A action to dump context to log" +description: "An action to dump context to log" runs: using: "composite" steps: diff --git a/.github/actions/e2e-deploy-vald-helm-operator/action.yaml b/.github/actions/e2e-deploy-vald-helm-operator/action.yaml index 7c056b78a2..43bc4c78c7 100644 --- a/.github/actions/e2e-deploy-vald-helm-operator/action.yaml +++ b/.github/actions/e2e-deploy-vald-helm-operator/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Deploy Vald using vald-helm-operator for E2E test" -description: "A action to deploy vald using vald-helm-operator for E2E test" +description: "An action to deploy vald using vald-helm-operator for E2E test" inputs: require_minio: description: "If Minio is required, set this to true." diff --git a/.github/actions/e2e-deploy-vald-readreplica/action.yaml b/.github/actions/e2e-deploy-vald-readreplica/action.yaml index 36fc7ba486..e6db2c2c25 100644 --- a/.github/actions/e2e-deploy-vald-readreplica/action.yaml +++ b/.github/actions/e2e-deploy-vald-readreplica/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Deploy Vald Read Replica for E2E test" -description: "A action to deploy vald read replica for E2E test" +description: "An action to deploy vald read replica for E2E test" inputs: require_minio: description: "If Minio is required, set this to true." diff --git a/.github/actions/e2e-deploy-vald/action.yaml b/.github/actions/e2e-deploy-vald/action.yaml index 5048653531..08ab6efe33 100644 --- a/.github/actions/e2e-deploy-vald/action.yaml +++ b/.github/actions/e2e-deploy-vald/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Deploy Vald for E2E test" -description: "A action to deploy vald for E2E test" +description: "An action to deploy vald for E2E test" inputs: require_minio: description: "If Minio is required, set this to true." diff --git a/.github/actions/notify-slack/action.yaml b/.github/actions/notify-slack/action.yaml index 6344ce1620..7b3e15c417 100644 --- a/.github/actions/notify-slack/action.yaml +++ b/.github/actions/notify-slack/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Notify Slack" -description: "A action to notify Slack" +description: "An action to notify Slack" inputs: author_name: description: "User name for slack notification" diff --git a/.github/actions/scan-docker-image/action.yaml b/.github/actions/scan-docker-image/action.yaml index 905e11c18a..a2791286dc 100644 --- a/.github/actions/scan-docker-image/action.yaml +++ b/.github/actions/scan-docker-image/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Scan the Docker image" -description: "A action to scan the Docker image" +description: "An action to scan the Docker image" inputs: image_ref: description: "Docker image reference" diff --git a/.github/actions/setup-e2e/action.yaml b/.github/actions/setup-e2e/action.yaml index b9bafa2c44..343b091867 100644 --- a/.github/actions/setup-e2e/action.yaml +++ b/.github/actions/setup-e2e/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Setup E2E environment" -description: "A action to set up the environment for executing E2E test" +description: "An action to set up the environment for executing E2E test" inputs: require_libhdf5: description: "If libhdf5 is not required, set this to false" diff --git a/.github/actions/setup-go/action.yaml b/.github/actions/setup-go/action.yaml index a7a07b841b..edd1662a89 100644 --- a/.github/actions/setup-go/action.yaml +++ b/.github/actions/setup-go/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Setup Go environment" -description: "A action to set up Go environment" +description: "An action to set up Go environment" inputs: go_version: description: "The Go version to use. The default version is `versions/GO_VERSION`" @@ -28,14 +28,25 @@ runs: shell: bash run: | GO_VERSION=${GO_VERSION:-`make version/go`} - echo "version=${GO_VERSION}" >> $GITHUB_OUTPUT env: GO_VERSION: ${{ inputs.go_version }} - - uses: actions/setup-go@v5 + - name: Check if Go is installed + id: check_go + shell: bash + run: | + if command -v go &> /dev/null; then + echo "go_installed=true" >> $GITHUB_OUTPUT + echo "installed_version=$(go version | awk '{print $3}')" >> $GITHUB_OUTPUT + else + echo "go_installed=false" >> $GITHUB_OUTPUT + fi + - name: Setup Go (if not installed or version mismatch) + if: steps.check_go.outputs.go_installed == 'false' || steps.check_go.outputs.installed_version != steps.go_version.outputs.version + uses: actions/setup-go@v5 with: go-version: ${{ steps.go_version.outputs.version }} - - name: Check Go version + - name: Verify Go version shell: bash run: | go version diff --git a/.github/actions/setup-helm/action.yaml b/.github/actions/setup-helm/action.yaml index bba7faa5b0..14ef0fd5fa 100644 --- a/.github/actions/setup-helm/action.yaml +++ b/.github/actions/setup-helm/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Setup Helm environment" -description: "A action to set up Helm environment" +description: "An action to set up Helm environment" inputs: helm_version: description: "The Helm version to use. The default version is `versions/HELM_VERSION`" @@ -28,14 +28,25 @@ runs: shell: bash run: | HELM_VERSION=${HELM_VERSION:-`make version/helm`} - echo "version=${HELM_VERSION}" >> $GITHUB_OUTPUT env: HELM_VERSION: ${{ inputs.helm_version }} - - uses: azure/setup-helm@v4 + - name: Check if Helm is installed + id: check_helm + shell: bash + run: | + if command -v helm &> /dev/null; then + echo "helm_installed=true" >> $GITHUB_OUTPUT + echo "installed_version=$(helm version --short --client | awk '{print $2}' | sed 's/^v//')" >> $GITHUB_OUTPUT + else + echo "helm_installed=false" >> $GITHUB_OUTPUT + fi + - name: Setup Helm (if not installed or version mismatch) + if: steps.check_helm.outputs.helm_installed == 'false' || steps.check_helm.outputs.installed_version != steps.helm_version.outputs.version + uses: azure/setup-helm@v4 with: version: ${{ steps.helm_version.outputs.version }} - - name: Check Helm version + - name: Verify Helm version shell: bash run: | - helm version + helm version --short --client diff --git a/.github/actions/setup-k3d/action.yaml b/.github/actions/setup-k3d/action.yaml index c28b4d1095..19f0d0cf87 100644 --- a/.github/actions/setup-k3d/action.yaml +++ b/.github/actions/setup-k3d/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Setup k3d environment" -description: "A action to set up k3d (k3s in Docker)" +description: "An action to set up k3d (k3s in Docker)" inputs: version: description: "k3d version" @@ -67,12 +67,21 @@ runs: echo "tag=${K3S_VERSION}" >> $GITHUB_OUTPUT env: K3S_VERSION: ${{ inputs.k3s_version }} - - name: Install k3d + - name: Check if k3d is installed + id: check_k3d shell: bash run: | - curl -s ${REPO_URL} | ${{ steps.k3d_version.outputs.tag }} bash - env: - REPO_URL: "https://raw.githubusercontent.com/k3d-io/k3d/main/install.sh" + if command -v k3d &> /dev/null; then + echo "k3d_installed=true" >> $GITHUB_OUTPUT + echo "installed_version=$(k3d version --short | sed 's/^v//')" >> $GITHUB_OUTPUT + else + echo "k3d_installed=false" >> $GITHUB_OUTPUT + fi + - name: Install k3d (if not installed or version mismatch) + if: steps.check_k3d.outputs.k3d_installed == 'false' || steps.check_k3d.outputs.installed_version != steps.k3d_version.outputs.tag + shell: bash + run: | + make k3d/install - name: Check k3d version shell: bash run: | diff --git a/.github/actions/setup-yq/action.yaml b/.github/actions/setup-yq/action.yaml index 64c2911f88..8a274b3030 100644 --- a/.github/actions/setup-yq/action.yaml +++ b/.github/actions/setup-yq/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Setup yq environment" -description: "A action to set up yq" +description: "An action to set up yq" inputs: yq_version: description: "The yq version to use. The default version is `versions/YQ_VERSION`" diff --git a/.github/actions/wait-for-docker-image/action.yaml b/.github/actions/wait-for-docker-image/action.yaml index be9b7c259d..5b57fbe467 100644 --- a/.github/actions/wait-for-docker-image/action.yaml +++ b/.github/actions/wait-for-docker-image/action.yaml @@ -14,7 +14,7 @@ # limitations under the License. # name: "Wait for Docker Images" -description: "A action to wait for Docker images to be published" +description: "An action to wait for Docker images to be published" inputs: images: description: "image names" diff --git a/.github/dependabot.yml b/.github/dependabot.yml deleted file mode 100644 index 71224536e0..0000000000 --- a/.github/dependabot.yml +++ /dev/null @@ -1,182 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -version: 2 -updates: - - package-ecosystem: gomod - directory: "/" - schedule: - interval: daily - open-pull-requests-limit: 10 - reviewers: - - kpango - - kmrmt - labels: - - type/dependency - ignore: - - dependency-name: github.com/aws/aws-sdk-go - versions: - - "> 1.31.4, < 1.32" - - dependency-name: github.com/aws/aws-sdk-go - versions: - - ">= 1.36.a, < 1.37" - - dependency-name: github.com/aws/aws-sdk-go - versions: - - 1.37.1 - - 1.37.10 - - 1.37.11 - - 1.37.12 - - 1.37.13 - - 1.37.14 - - 1.37.15 - - 1.37.16 - - 1.37.17 - - 1.37.18 - - 1.37.19 - - 1.37.2 - - 1.37.20 - - 1.37.21 - - 1.37.22 - - 1.37.23 - - 1.37.24 - - 1.37.25 - - 1.37.26 - - 1.37.27 - - 1.37.28 - - 1.37.29 - - 1.37.3 - - 1.37.30 - - 1.37.31 - - 1.37.32 - - 1.37.33 - - 1.37.5 - - 1.37.6 - - 1.37.7 - - 1.37.8 - - 1.37.9 - - 1.38.0 - - 1.38.1 - - 1.38.11 - - 1.38.12 - - 1.38.13 - - 1.38.14 - - 1.38.15 - - 1.38.17 - - 1.38.18 - - 1.38.19 - - 1.38.2 - - 1.38.20 - - 1.38.21 - - 1.38.22 - - 1.38.24 - - 1.38.25 - - 1.38.26 - - 1.38.27 - - 1.38.28 - - 1.38.3 - - 1.38.4 - - 1.38.6 - - 1.38.7 - - 1.38.8 - - 1.38.9 - - dependency-name: github.com/json-iterator/go - versions: - - 1.1.11 - - dependency-name: github.com/envoyproxy/protoc-gen-validate - versions: - - 0.5.0 - - 0.5.1 - - 0.6.0 - - 0.6.1 - - dependency-name: github.com/go-redis/redis/v8 - versions: - - 8.5.0 - - 8.8.0 - - 8.8.2 - - dependency-name: k8s.io/apimachinery - versions: - - 0.20.3 - - 0.20.4 - - 0.20.5 - - 0.21.0 - - dependency-name: k8s.io/api - versions: - - 0.20.3 - - 0.20.4 - - 0.20.5 - - 0.21.0 - - dependency-name: google.golang.org/grpc - versions: - - 1.36.0 - - 1.36.1 - - 1.37.0 - - dependency-name: google.golang.org/api - versions: - - 0.38.0 - - 0.39.0 - - 0.40.0 - - 0.41.0 - - 0.42.0 - - 0.43.0 - - 0.44.0 - - dependency-name: cloud.google.com/go - versions: - - 0.76.0 - - 0.77.0 - - 0.78.0 - - 0.79.0 - - 0.80.0 - - 0.81.0 - - dependency-name: github.com/kpango/glg - versions: - - 1.5.4 - - 1.5.5 - - 1.5.6 - - 1.5.7 - - 1.5.8 - - dependency-name: github.com/hashicorp/go-version - versions: - - 1.3.0 - - dependency-name: github.com/go-sql-driver/mysql - versions: - - 1.6.0 - - dependency-name: github.com/golang/protobuf - versions: - - 1.5.1 - - 1.5.2 - - dependency-name: k8s.io/client-go - versions: - - 0.20.3 - - 0.20.4 - - 0.20.5 - - dependency-name: gonum.org/v1/plot - versions: - - 0.9.0 - - dependency-name: contrib.go.opencensus.io/exporter/prometheus - versions: - - 0.3.0 - - dependency-name: github.com/google/go-cmp - versions: - - 0.5.5 - - dependency-name: go.opencensus.io - versions: - - 0.22.6 - - 0.23.0 - - dependency-name: github.com/kpango/gache - versions: - - 1.2.5 - - dependency-name: go.uber.org/automaxprocs - versions: - - 1.4.0 diff --git a/.github/helm/values/values-correction.yaml b/.github/helm/values/values-correction.yaml index 7b986f0da1..c412d4ef61 100644 --- a/.github/helm/values/values-correction.yaml +++ b/.github/helm/values/values-correction.yaml @@ -16,7 +16,7 @@ defaults: logging: - level: info + level: debug networkPolicy: enabled: true gateway: diff --git a/.github/labeler.yml b/.github/labeler.yml deleted file mode 100644 index 2ccd45565e..0000000000 --- a/.github/labeler.yml +++ /dev/null @@ -1,181 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -area/agent/core/faiss: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/agent/core/**/* - - apis/proto/v1/agent/core/**/* - - cmd/agent/core/faiss/* - - pkg/agent/core/faiss/**/* - - pkg/agent/internal/**/* -area/agent/core/ngt: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/agent/core/**/* - - apis/proto/v1/agent/core/**/* - - cmd/agent/core/ngt/* - - pkg/agent/core/ngt/**/* - - pkg/agent/internal/**/* -area/agent/sidecar: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/agent/sidecar/**/* - - apis/proto/v1/agent/sidecar/**/* - - cmd/agent/sidecar/**/* - - pkg/agent/sidecar/**/* -area/discoverer: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/discoverer/**/* - - apis/proto/v1/discoverer/**/* - - cmd/discoverer/**/* - - pkg/discoverer/**/* -area/gateway/lb: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/discoverer/**/* - - apis/grpc/v1/payload/**/* - - apis/grpc/v1/vald/**/* - - apis/proto/v1/discoverer/**/* - - apis/proto/v1/payload/**/* - - apis/proto/v1/vald/**/* - - cmd/gateway/lb/**/* - - pkg/gateway/lb/**/* -area/gateway/filter: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/filter/egress/* - - apis/grpc/v1/filter/ingress/* - - apis/grpc/v1/payload/**/* - - apis/grpc/v1/vald/**/* - - apis/proto/v1/filter/egress/* - - apis/proto/v1/filter/ingress/* - - apis/proto/v1/payload/**/* - - apis/proto/v1/vald/**/* - - cmd/gateway/filter/**/* - - pkg/gateway/filter/**/* -area/gateway/mirror: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/payload/**/* - - apis/grpc/v1/vald/**/* - - apis/proto/v1/payload/**/* - - apis/proto/v1/vald/**/* - - cmd/gateway/mirror/**/* - - pkg/gateway/mirror/**/* -area/manager/index: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/discoverer/**/* - - apis/grpc/v1/manager/index/**/* - - apis/grpc/v1/payload/**/* - - apis/proto/v1/discoverer/**/* - - apis/proto/v1/manager/index/**/* - - apis/proto/v1/payload/**/* - - cmd/manager/index/**/* - - pkg/manager/index/**/* -area/index/job/correction: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/agent/core/**/* - - apis/grpc/v1/discoverer/**/* - - apis/grpc/v1/payload/**/* - - apis/grpc/v1/vald/**/* - - apis/proto/v1/agent/core/**/* - - apis/proto/v1/discoverer/**/* - - apis/proto/v1/payload/**/* - - apis/proto/v1/vald/**/* - - cmd/index/job/correction/* - - pkg/index/job/correction/**/* -area/index/job/creation: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/agent/core/**/* - - apis/grpc/v1/discoverer/**/* - - apis/grpc/v1/payload/**/* - - apis/grpc/v1/vald/**/* - - apis/proto/v1/agent/core/**/* - - apis/proto/v1/discoverer/**/* - - apis/proto/v1/payload/**/* - - apis/proto/v1/vald/**/* - - cmd/index/job/creation/* - - pkg/index/job/creation/**/* -area/index/job/save: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/agent/core/**/* - - apis/grpc/v1/discoverer/**/* - - apis/grpc/v1/payload/**/* - - apis/grpc/v1/vald/**/* - - apis/proto/v1/agent/core/**/* - - apis/proto/v1/discoverer/**/* - - apis/proto/v1/payload/**/* - - apis/proto/v1/vald/**/* - - cmd/index/job/save/* - - pkg/index/job/save/**/* -area/index/job/readreplica: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/agent/core/**/* - - apis/grpc/v1/discoverer/**/* - - apis/grpc/v1/payload/**/* - - apis/grpc/v1/vald/**/* - - apis/proto/v1/agent/core/**/* - - apis/proto/v1/discoverer/**/* - - apis/proto/v1/payload/**/* - - apis/proto/v1/vald/**/* - - cmd/index/job/readreplica/**/* - - pkg/index/job/readreplica/**/* -area/filter/egress: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/filter/egress/* - - apis/grpc/v1/payload/**/* - - apis/proto/v1/filter/egress/* - - apis/proto/v1/payload/**/* -area/filter/ingress: - - changed-files: - - any-glob-to-any-file: - - apis/grpc/v1/filter/ingress/* - - apis/grpc/v1/payload/**/* - - apis/proto/v1/filter/ingress/* - - apis/proto/v1/payload/**/* -area/tools/cli/loadtest: - - changed-files: - - any-glob-to-any-file: - - apis/proto/v1/agent/core/**/* - - apis/proto/v1/vald/**/* - - apis/grpc/v1/agent/core/**/* - - apis/grpc/v1/vald/**/* - - cmd/tools/cli/loadtest/**/* - - pkg/tools/cli/loadtest/**/* -area/internal: - - changed-files: - - any-glob-to-any-file: - - internal/**/* -area/helm: - - changed-files: - - any-glob-to-any-file: - - charts/**/* -area/makefile: - - changed-files: - - any-glob-to-any-file: - - Makefile - - Makefile.d/**/* -type/ci: - - changed-files: - - any-glob-to-any-file: - - .github/**/* diff --git a/.github/workflows/_detect-ci-container.yml b/.github/workflows/_detect-ci-container.yml deleted file mode 100644 index 06e27c1602..0000000000 --- a/.github/workflows/_detect-ci-container.yml +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Detect CI container image tag" -on: - workflow_call: - outputs: - TAG: - description: "The docker image tag name" - value: ${{ jobs.detect.outputs.TAG }} -env: - TARGET_IMAGE: vdaas/vald-ci-container -jobs: - detect: - runs-on: ubuntu-latest - outputs: - TAG: ${{ steps.get_tag_name.outputs.TAG }} - steps: - - uses: actions/checkout@v4 - - name: Determine Docker image tag - id: determine_tag_name - uses: ./.github/actions/determine-docker-image-tag - - name: Detect Docker image tag - id: detect_tag_name - uses: ./.github/actions/detect-docker-image-tags - with: - images: ${{ env.TARGET_IMAGE }} - tags: ${{ steps.determine_tag_name.outputs.TAGS }} - - name: Get Docker image tag from detection result - id: get_tag_name - run: | - TAG=$(echo "$TAGS" | awk '{print $1}' | awk -F '=' '{print $2}') - echo "TAG=${TAG}" - echo "TAG=${TAG}" >> $GITHUB_OUTPUT - env: - TAGS: ${{ steps.detect_tag_name.outputs.IMAGE_TAGS }} diff --git a/.github/workflows/_release-pr.yml b/.github/workflows/_release-pr.yml deleted file mode 100644 index 5bbd150d05..0000000000 --- a/.github/workflows/_release-pr.yml +++ /dev/null @@ -1,134 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Create release PR" -on: - workflow_call: - inputs: - release_branch_name: - type: string - description: "The release branch name. e.g release/v1.7" - required: true - release_tag: - type: string - description: "The release tag" - required: true -jobs: - dump-contexts-to-log: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/dump-context - detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml - secrets: inherit - create: - needs: - - dump-contexts-to-log - - detect-ci-container - runs-on: ubuntu-latest - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - env: - RELEASE_BRANCH_NAME: ${{ inputs.release_branch_name }} - PREPARE_RELEASE_BRANCH_NAME: prepare/${{ inputs.release_branch_name }} - RELEASE_TAG: ${{ inputs.release_tag }} - steps: - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - token: ${{ secrets.DISPATCH_TOKEN }} - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: crazy-max/ghaction-import-gpg@v6 - with: - gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} - git_user_signingkey: true - git_commit_gpgsign: true - - name: Update for new release - id: update_for_new_release - run: | - git checkout ${RELEASE_BRANCH_NAME} - git checkout -b ${PREPARE_RELEASE_BRANCH_NAME} && git push origin ${PREPARE_RELEASE_BRANCH_NAME} - - LAST_COMMIT_MESSAGE=`git log --pretty=format:%s -1` - PR_NUM=`echo "${LAST_COMMIT_MESSAGE}" | grep -o "#[[:digit:]]\+" | sed -e 's/#//' | head -1` - - PREVIOUS_VERSION=`cat versions/VALD_VERSION` - echo "${RELEASE_TAG}" > versions/VALD_VERSION - - sed -i -e "s/^version: .*$/version: ${RELEASE_TAG}/" charts/vald/Chart.yaml - sed -i -r "s/^(\s*)tag\s*:\s*v[0-9]+\.[0-9]+\.[0-9]+\s*$/\1tag: ${RELEASE_TAG}/" charts/vald/values.yaml - sed -i -e "s/^version: .*$/version: ${RELEASE_TAG}/" charts/vald-helm-operator/Chart.yaml - sed -i -r "s/^(\s*)tag\s*:\s*v[0-9]+\.[0-9]+\.[0-9]+\s*$/\1tag: ${RELEASE_TAG}/" charts/vald-helm-operator/values.yaml - sed -i -e "s/^version: .*$/version: ${RELEASE_TAG}/" charts/vald-readreplica/Chart.yaml - sed -i -e "s/^version: .*$/version: ${RELEASE_TAG}/" charts/vald-benchmark-operator/Chart.yaml - sed -i -r "s/^(\s*)tag\s*:\s*v[0-9]+\.[0-9]+\.[0-9]+\s*$/\1tag: ${RELEASE_TAG}/" charts/vald-benchmark-operator/values.yaml - - make helm/schema/all - make helm/schema/crd/all - make k8s/manifest/update - make k8s/manifest/helm-operator/update - make k8s/manifest/benchmark-operator/update - make k8s/manifest/readreplica/update - make helm/docs/vald - make helm/docs/vald-helm-operator - make helm/docs/vald-benchmark-operator - make helm/docs/vald-readreplica - - BODY="" - if [ ! -z "${PR_NUM}" ]; then - BODY=`curl -s -H "Authorization: Bearer ${GITHUB_TOKEN}" "https://api.github.com/repos/${GITHUB_REPOSITORY}/pulls/${PR_NUM}" | jq -r '.body'` - fi - - if [ -z "$BODY" ]; then - BODY=`git log --pretty=format:'- %s' ${PREVIOUS_VERSION}..${RELEASE_BRANCH_NAME} | grep "#[[:digit:]]\+" | sed -e "s/\[\(patch\|minor\|major\)\] *//g" | sed -e "s%#\([[:digit:]]\+\)%[&](https://github.com/vdaas/vald/pull/\1)%"` - fi - - CHANGELOG=`make changelog/next/print BODY="$BODY"` - make changelog/update BODY="$BODY" - - make format - - git add \ - CHANGELOG.md \ - charts \ - k8s \ - versions/VALD_VERSION - git commit -S --signoff -m ":bookmark: :robot: Release ${RELEASE_TAG}" - git push -u origin ${PREPARE_RELEASE_BRANCH_NAME} - - PR_NUM=$(curl --fail \ - -H "Accept: application/json" \ - -H "Content-Type:application/json" \ - -H "Authorization: token ${GITHUB_TOKEN}" \ - --request POST \ - --data "{\"title\": \":bookmark: :robot: Release ${RELEASE_TAG}\", \"head\": \"${PREPARE_RELEASE_BRANCH_NAME}\", \"base\": \"${RELEASE_BRANCH_NAME}\", \"body\": \"Release PR for ${RELEASE_TAG}.\", \"maintainer_can_modify\": true}" \ - ${API_BASE_URL}/pulls | jq '.number' ) - echo ${PR_NUM} - - curl --fail \ - -H "Accept: application/json" \ - -H "Content-Type:application/json" \ - -H "Authorization: token ${GITHUB_TOKEN}" \ - --request POST \ - --data "{\"labels\":[\"${BACKPORT_MAIN_LABEL_NAME}\"]}" \ - ${API_BASE_URL}/issues/${PR_NUM}/labels - env: - GITHUB_USER: ${{ secrets.DISPATCH_USER }} - GITHUB_TOKEN: ${{ secrets.DISPATCH_TOKEN }} - BACKPORT_MAIN_LABEL_NAME: actions/backport/main - API_BASE_URL: https://api.github.com/repos/vdaas/vald diff --git a/.github/workflows/dockers-agent-faiss-image.yaml b/.github/workflows/dockers-agent-faiss-image.yaml new file mode 100644 index 0000000000..153ee05a14 --- /dev/null +++ b/.github/workflows/dockers-agent-faiss-image.yaml @@ -0,0 +1,268 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: agent-faiss" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-agent-faiss-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/agent/core/faiss/*.go + - dockers/agent/core/faiss/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/core/algorithm/*.go + - internal/core/algorithm/faiss/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/agent/core/faiss/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/info/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/agent/core/faiss/config/*.go + - pkg/agent/core/faiss/handler/grpc/*.go + - pkg/agent/core/faiss/handler/rest/*.go + - pkg/agent/core/faiss/router/*.go + - pkg/agent/core/faiss/service/*.go + - pkg/agent/core/faiss/usecase/*.go + - pkg/agent/internal/kvs/*.go + - pkg/agent/internal/memstore/*.go + - pkg/agent/internal/metadata/*.go + - pkg/agent/internal/vqueue/*.go + - versions/FAISS_VERSION + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-agent-faiss-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/agent/core/faiss/*.go + - dockers/agent/core/faiss/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/core/algorithm/*.go + - internal/core/algorithm/faiss/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/agent/core/faiss/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/info/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/agent/core/faiss/config/*.go + - pkg/agent/core/faiss/handler/grpc/*.go + - pkg/agent/core/faiss/handler/rest/*.go + - pkg/agent/core/faiss/router/*.go + - pkg/agent/core/faiss/service/*.go + - pkg/agent/core/faiss/usecase/*.go + - pkg/agent/internal/kvs/*.go + - pkg/agent/internal/memstore/*.go + - pkg/agent/internal/metadata/*.go + - pkg/agent/internal/vqueue/*.go + - versions/FAISS_VERSION + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: agent-faiss + secrets: inherit diff --git a/.github/workflows/dockers-agent-faiss-image.yml b/.github/workflows/dockers-agent-faiss-image.yml deleted file mode 100644 index 49dc0ae3db..0000000000 --- a/.github/workflows/dockers-agent-faiss-image.yml +++ /dev/null @@ -1,69 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: agent-faiss" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-agent-faiss-image.yml" - - "Makefile" - - "apis/grpc/**" - - "cmd/agent/core/faiss/**" - - "dockers/agent/core/faiss/Dockerfile" - - "go.mod" - - "go.sum" - - "internal/**" - - "pkg/agent/core/faiss/**" - - "versions/FAISS_VERSION" - - "versions/GO_VERSION" - pull_request_target: - paths: - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-agent-faiss-image.yml" - - "Makefile" - - "apis/grpc/**" - - "cmd/agent/core/faiss/**" - - "dockers/agent/core/faiss/Dockerfile" - - "go.mod" - - "go.sum" - - "internal/**" - - "pkg/agent/core/faiss/**" - - "versions/FAISS_VERSION" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: agent-faiss - secrets: inherit diff --git a/.github/workflows/dockers-agent-image.yaml b/.github/workflows/dockers-agent-image.yaml new file mode 100644 index 0000000000..f1311bf7fb --- /dev/null +++ b/.github/workflows/dockers-agent-image.yaml @@ -0,0 +1,74 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: agent" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-agent-image.yaml + - Makefile + - Makefile.d/** + - apis/proto/** + - dockers/agent/core/agent/Dockerfile + - hack/docker/gen/main.go + - rust/Cargo.lock + - rust/Cargo.toml + - rust/bin/agent + - rust/libs/ngt-rs/** + - rust/libs/ngt/** + - rust/libs/proto/** + - versions/FAISS_VERSION + - versions/NGT_VERSION + - versions/RUST_VERSION + pull_request_target: + paths: + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-agent-image.yaml + - Makefile + - Makefile.d/** + - apis/proto/** + - dockers/agent/core/agent/Dockerfile + - hack/docker/gen/main.go + - rust/Cargo.lock + - rust/Cargo.toml + - rust/bin/agent + - rust/libs/ngt-rs/** + - rust/libs/ngt/** + - rust/libs/proto/** + - versions/FAISS_VERSION + - versions/NGT_VERSION + - versions/RUST_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: agent + secrets: inherit diff --git a/.github/workflows/dockers-agent-image.yml b/.github/workflows/dockers-agent-image.yml deleted file mode 100644 index 1c7e8ed85b..0000000000 --- a/.github/workflows/dockers-agent-image.yml +++ /dev/null @@ -1,65 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: agent" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-agent-image.yml" - - "Makefile" - - "dockers/agent/core/agent/Dockerfile" - - "rust/Cargo.lock" - - "rust/Cargo.toml" - - "rust/bin/agent/**" - - "rust/libs/ngt-rs/**" - - "rust/libs/ngt/**" - - "rust/libs/proto/**" - - "versions/FAISS_VERSION" - - "versions/NGT_VERSION" - - "versions/RUST_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-agent-image.yml" - - "Makefile" - - "dockers/agent/core/agent/Dockerfile" - - "rust/Cargo.lock" - - "rust/Cargo.toml" - - "rust/bin/agent/**" - - "rust/libs/ngt-rs/**" - - "rust/libs/ngt/**" - - "rust/libs/proto/**" - - "versions/FAISS_VERSION" - - "versions/NGT_VERSION" - - "versions/RUST_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: agent - secrets: inherit diff --git a/.github/workflows/dockers-agent-ngt-image.yaml b/.github/workflows/dockers-agent-ngt-image.yaml new file mode 100644 index 0000000000..4610cb8399 --- /dev/null +++ b/.github/workflows/dockers-agent-ngt-image.yaml @@ -0,0 +1,272 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: agent-ngt" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-agent-ngt-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/agent/core/ngt/*.go + - dockers/agent/core/ngt/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/core/algorithm/*.go + - internal/core/algorithm/ngt/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/client/*.go + - internal/k8s/vald/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/agent/core/ngt/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/info/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/agent/core/ngt/config/*.go + - pkg/agent/core/ngt/handler/grpc/*.go + - pkg/agent/core/ngt/handler/rest/*.go + - pkg/agent/core/ngt/router/*.go + - pkg/agent/core/ngt/service/*.go + - pkg/agent/core/ngt/usecase/*.go + - pkg/agent/internal/kvs/*.go + - pkg/agent/internal/memstore/*.go + - pkg/agent/internal/metadata/*.go + - pkg/agent/internal/vqueue/*.go + - versions/GO_VERSION + - versions/NGT_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-agent-ngt-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/agent/core/ngt/*.go + - dockers/agent/core/ngt/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/core/algorithm/*.go + - internal/core/algorithm/ngt/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/client/*.go + - internal/k8s/vald/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/agent/core/ngt/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/info/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/agent/core/ngt/config/*.go + - pkg/agent/core/ngt/handler/grpc/*.go + - pkg/agent/core/ngt/handler/rest/*.go + - pkg/agent/core/ngt/router/*.go + - pkg/agent/core/ngt/service/*.go + - pkg/agent/core/ngt/usecase/*.go + - pkg/agent/internal/kvs/*.go + - pkg/agent/internal/memstore/*.go + - pkg/agent/internal/metadata/*.go + - pkg/agent/internal/vqueue/*.go + - versions/GO_VERSION + - versions/NGT_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: agent-ngt + secrets: inherit diff --git a/.github/workflows/dockers-agent-ngt-image.yml b/.github/workflows/dockers-agent-ngt-image.yml deleted file mode 100644 index f3e7f49f1c..0000000000 --- a/.github/workflows/dockers-agent-ngt-image.yml +++ /dev/null @@ -1,69 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: agent-ngt" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-agent-ngt-image.yml" - - "Makefile" - - "apis/grpc/**" - - "cmd/agent/core/ngt/**" - - "dockers/agent/core/ngt/Dockerfile" - - "go.mod" - - "go.sum" - - "internal/**" - - "pkg/agent/core/ngt/**" - - "versions/NGT_VERSION" - - "versions/GO_VERSION" - pull_request_target: - paths: - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-agent-ngt-image.yml" - - "Makefile" - - "apis/grpc/**" - - "cmd/agent/core/ngt/**" - - "dockers/agent/core/ngt/Dockerfile" - - "go.mod" - - "go.sum" - - "internal/**" - - "pkg/agent/core/ngt/**" - - "versions/NGT_VERSION" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: agent-ngt - secrets: inherit diff --git a/.github/workflows/dockers-agent-sidecar-image.yaml b/.github/workflows/dockers-agent-sidecar-image.yaml new file mode 100644 index 0000000000..a3a0f6a68e --- /dev/null +++ b/.github/workflows/dockers-agent-sidecar-image.yaml @@ -0,0 +1,298 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: agent-sidecar" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-agent-sidecar-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/sidecar/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/agent/sidecar/*.go + - dockers/agent/sidecar/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/compress/*.go + - internal/compress/gob/*.go + - internal/compress/gzip/*.go + - internal/compress/lz4/*.go + - internal/compress/zstd/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/db/storage/blob/*.go + - internal/db/storage/blob/cloudstorage/*.go + - internal/db/storage/blob/cloudstorage/urlopener/*.go + - internal/db/storage/blob/s3/*.go + - internal/db/storage/blob/s3/reader/*.go + - internal/db/storage/blob/s3/reader/io/*.go + - internal/db/storage/blob/s3/sdk/s3/*.go + - internal/db/storage/blob/s3/sdk/s3/s3iface/*.go + - internal/db/storage/blob/s3/sdk/s3/s3manager/*.go + - internal/db/storage/blob/s3/session/*.go + - internal/db/storage/blob/s3/writer/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/file/watch/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/client/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/http/transport/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/agent/sidecar/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/unit/*.go + - internal/version/*.go + - pkg/agent/internal/metadata/*.go + - pkg/agent/sidecar/config/*.go + - pkg/agent/sidecar/handler/grpc/*.go + - pkg/agent/sidecar/handler/rest/*.go + - pkg/agent/sidecar/router/*.go + - pkg/agent/sidecar/service/observer/*.go + - pkg/agent/sidecar/service/restorer/*.go + - pkg/agent/sidecar/service/storage/*.go + - pkg/agent/sidecar/usecase/*.go + - pkg/agent/sidecar/usecase/initcontainer/*.go + - pkg/agent/sidecar/usecase/sidecar/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-agent-sidecar-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/sidecar/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/agent/sidecar/*.go + - dockers/agent/sidecar/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/compress/*.go + - internal/compress/gob/*.go + - internal/compress/gzip/*.go + - internal/compress/lz4/*.go + - internal/compress/zstd/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/db/storage/blob/*.go + - internal/db/storage/blob/cloudstorage/*.go + - internal/db/storage/blob/cloudstorage/urlopener/*.go + - internal/db/storage/blob/s3/*.go + - internal/db/storage/blob/s3/reader/*.go + - internal/db/storage/blob/s3/reader/io/*.go + - internal/db/storage/blob/s3/sdk/s3/*.go + - internal/db/storage/blob/s3/sdk/s3/s3iface/*.go + - internal/db/storage/blob/s3/sdk/s3/s3manager/*.go + - internal/db/storage/blob/s3/session/*.go + - internal/db/storage/blob/s3/writer/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/file/watch/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/client/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/http/transport/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/agent/sidecar/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/unit/*.go + - internal/version/*.go + - pkg/agent/internal/metadata/*.go + - pkg/agent/sidecar/config/*.go + - pkg/agent/sidecar/handler/grpc/*.go + - pkg/agent/sidecar/handler/rest/*.go + - pkg/agent/sidecar/router/*.go + - pkg/agent/sidecar/service/observer/*.go + - pkg/agent/sidecar/service/restorer/*.go + - pkg/agent/sidecar/service/storage/*.go + - pkg/agent/sidecar/usecase/*.go + - pkg/agent/sidecar/usecase/initcontainer/*.go + - pkg/agent/sidecar/usecase/sidecar/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: agent-sidecar + secrets: inherit diff --git a/.github/workflows/dockers-agent-sidecar-image.yml b/.github/workflows/dockers-agent-sidecar-image.yml deleted file mode 100644 index d34feb6e93..0000000000 --- a/.github/workflows/dockers-agent-sidecar-image.yml +++ /dev/null @@ -1,67 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: agent-sidecar" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-agent-sidecar-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "internal/db/storage/blob/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/agent/sidecar/**" - - "cmd/agent/sidecar/**" - - "dockers/agent/sidecar/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-agent-sidecar-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "internal/db/storage/blob/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/agent/sidecar/**" - - "cmd/agent/sidecar/**" - - "dockers/agent/sidecar/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: agent-sidecar - secrets: inherit diff --git a/.github/workflows/dockers-benchmark-job-image.yaml b/.github/workflows/dockers-benchmark-job-image.yaml new file mode 100644 index 0000000000..9202808fe5 --- /dev/null +++ b/.github/workflows/dockers-benchmark-job-image.yaml @@ -0,0 +1,264 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: benchmark-job" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-benchmark-job-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/tools/benchmark/job/*.go + - dockers/tools/benchmark/job/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/client/*.go + - internal/k8s/vald/benchmark/api/v1/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/client/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/http/transport/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/info/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/test/data/hdf5/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/timeutil/rate/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/tools/benchmark/job/config/*.go + - pkg/tools/benchmark/job/handler/grpc/*.go + - pkg/tools/benchmark/job/handler/rest/*.go + - pkg/tools/benchmark/job/router/*.go + - pkg/tools/benchmark/job/service/*.go + - pkg/tools/benchmark/job/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-benchmark-job-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/tools/benchmark/job/*.go + - dockers/tools/benchmark/job/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/client/*.go + - internal/k8s/vald/benchmark/api/v1/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/client/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/http/transport/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/info/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/test/data/hdf5/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/timeutil/rate/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/tools/benchmark/job/config/*.go + - pkg/tools/benchmark/job/handler/grpc/*.go + - pkg/tools/benchmark/job/handler/rest/*.go + - pkg/tools/benchmark/job/router/*.go + - pkg/tools/benchmark/job/service/*.go + - pkg/tools/benchmark/job/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: benchmark-job + secrets: inherit diff --git a/.github/workflows/dockers-benchmark-job-image.yml b/.github/workflows/dockers-benchmark-job-image.yml deleted file mode 100644 index d39dcf66ed..0000000000 --- a/.github/workflows/dockers-benchmark-job-image.yml +++ /dev/null @@ -1,65 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: benchmark-job" -on: - push: - branches: - - main - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-benchmak-job-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "apis/grpc/**" - - "pkg/tools/benchmark/operator/**" - - "cmd/tools/benchmark/operator/**" - - "pkg/tools/benchmark/job/**" - - "cmd/tools/benchmark/job/**" - - "dockers/tools/benchmark/job/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-benchmak-job-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "apis/grpc/**" - - "pkg/tools/benchmark/operator/**" - - "cmd/tools/benchmark/operator/**" - - "pkg/tools/benchmark/job/**" - - "cmd/tools/benchmark/job/**" - - "dockers/tools/benchmark/job/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: benchmark-job - secrets: inherit diff --git a/.github/workflows/dockers-benchmark-operator-image.yaml b/.github/workflows/dockers-benchmark-operator-image.yaml index a69c101686..403d2bb82a 100644 --- a/.github/workflows/dockers-benchmark-operator-image.yaml +++ b/.github/workflows/dockers-benchmark-operator-image.yaml @@ -13,50 +13,243 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + name: "Build docker image: benchmark-operator" on: push: branches: - main + - release/v*.* + - "!release/v*.*.*" tags: - "*.*.*" - - "v*.*.*" - "*.*.*-*" - - "v*.*.*-*" + - v*.*.* + - v*.*.*-* pull_request: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-benchmak-operator-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "apis/grpc/**" - - "pkg/tools/benchmark/operator/**" - - "cmd/tools/benchmark/operator/**" - - "pkg/tools/benchmark/job/**" - - "cmd/tools/benchmark/job/**" - - "dockers/tools/benchmark/operator/Dockerfile" - - "versions/GO_VERSION" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-benchmark-operator-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/tools/benchmark/operator/*.go + - dockers/tools/benchmark/operator/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/job/*.go + - internal/k8s/vald/benchmark/api/v1/*.go + - internal/k8s/vald/benchmark/job/*.go + - internal/k8s/vald/benchmark/scenario/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/info/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/tools/benchmark/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/tools/benchmark/operator/config/*.go + - pkg/tools/benchmark/operator/handler/grpc/*.go + - pkg/tools/benchmark/operator/handler/rest/*.go + - pkg/tools/benchmark/operator/router/*.go + - pkg/tools/benchmark/operator/service/*.go + - pkg/tools/benchmark/operator/usecase/*.go + - versions/GO_VERSION pull_request_target: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-benchmak-operator-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "apis/grpc/**" - - "pkg/tools/benchmark/operator/**" - - "cmd/tools/benchmark/operator/**" - - "pkg/tools/benchmark/job/**" - - "cmd/tools/benchmark/job/**" - - "dockers/tools/benchmark/operator/Dockerfile" - - "versions/GO_VERSION" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-benchmark-operator-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/tools/benchmark/operator/*.go + - dockers/tools/benchmark/operator/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/job/*.go + - internal/k8s/vald/benchmark/api/v1/*.go + - internal/k8s/vald/benchmark/job/*.go + - internal/k8s/vald/benchmark/scenario/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/info/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/tools/benchmark/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/tools/benchmark/operator/config/*.go + - pkg/tools/benchmark/operator/handler/grpc/*.go + - pkg/tools/benchmark/operator/handler/rest/*.go + - pkg/tools/benchmark/operator/router/*.go + - pkg/tools/benchmark/operator/service/*.go + - pkg/tools/benchmark/operator/usecase/*.go + - versions/GO_VERSION jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-binfmt-image.yaml b/.github/workflows/dockers-binfmt-image.yaml index cb165a86e8..d3934c1fe5 100644 --- a/.github/workflows/dockers-binfmt-image.yaml +++ b/.github/workflows/dockers-binfmt-image.yaml @@ -13,32 +13,49 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + name: "Build docker image: binfmt" on: schedule: - - cron: "0 * * * *" + - cron: 0 * * * * push: branches: - - "main" - - "release/v*.*" + - main + - release/v*.* - "!release/v*.*.*" tags: - "*.*.*" - - "v*.*.*" - "*.*.*-*" - - "v*.*.*-*" + - v*.*.* + - v*.*.*-* pull_request: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-binfmt-image.yml" - - "dockers/binfmt/Dockerfile" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-binfmt-image.yaml + - apis/proto/** + - dockers/binfmt/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - versions/GO_VERSION pull_request_target: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-binfmt-image.yml" - - "dockers/binfmt/Dockerfile" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-binfmt-image.yaml + - apis/proto/** + - dockers/binfmt/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - versions/GO_VERSION jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-buildbase-image.yml b/.github/workflows/dockers-buildbase-image.yml deleted file mode 100644 index 609a0c7cae..0000000000 --- a/.github/workflows/dockers-buildbase-image.yml +++ /dev/null @@ -1,48 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: buildbase" -on: - schedule: - - cron: "0 * * * *" - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-buildbase-image.yml" - - "dockers/buildbase/Dockerfile" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-buildbase-image.yml" - - "dockers/buildbase/Dockerfile" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: buildbase - platforms: linux/amd64,linux/arm64 - secrets: inherit diff --git a/.github/workflows/dockers-buildkit-image.yaml b/.github/workflows/dockers-buildkit-image.yaml index 62a98d7f14..536e0bd2e0 100644 --- a/.github/workflows/dockers-buildkit-image.yaml +++ b/.github/workflows/dockers-buildkit-image.yaml @@ -13,32 +13,49 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + name: "Build docker image: buildkit" on: schedule: - - cron: "0 * * * *" + - cron: 0 * * * * push: branches: - - "main" - - "release/v*.*" + - main + - release/v*.* - "!release/v*.*.*" tags: - "*.*.*" - - "v*.*.*" - "*.*.*-*" - - "v*.*.*-*" + - v*.*.* + - v*.*.*-* pull_request: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-buildkit-image.yml" - - "dockers/buildkit/Dockerfile" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-buildkit-image.yaml + - apis/proto/** + - dockers/buildkit/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - versions/GO_VERSION pull_request_target: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-buildkit-image.yml" - - "dockers/buildkit/Dockerfile" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-buildkit-image.yaml + - apis/proto/** + - dockers/buildkit/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - versions/GO_VERSION jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-buildkit-syft-scanner-image.yaml b/.github/workflows/dockers-buildkit-syft-scanner-image.yaml index fd5ebfbc4d..bf31e96122 100644 --- a/.github/workflows/dockers-buildkit-syft-scanner-image.yaml +++ b/.github/workflows/dockers-buildkit-syft-scanner-image.yaml @@ -13,32 +13,49 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + name: "Build docker image: buildkit-syft-scanner" on: schedule: - - cron: "0 * * * *" + - cron: 0 * * * * push: branches: - - "main" - - "release/v*.*" + - main + - release/v*.* - "!release/v*.*.*" tags: - "*.*.*" - - "v*.*.*" - "*.*.*-*" - - "v*.*.*-*" + - v*.*.* + - v*.*.*-* pull_request: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-buildkit-syft-scanner-image.yml" - - "dockers/buildkit/syft/scanner/Dockerfile" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-buildkit-syft-scanner-image.yaml + - apis/proto/** + - dockers/buildkit/syft/scanner/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - versions/GO_VERSION pull_request_target: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-buildkit-syft-scanner-image.yml" - - "dockers/buildkit/syft/scanner/Dockerfile" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-buildkit-syft-scanner-image.yaml + - apis/proto/** + - dockers/buildkit/syft/scanner/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - versions/GO_VERSION jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-ci-container-image.yml b/.github/workflows/dockers-ci-container-image.yml deleted file mode 100644 index adaa9f70c3..0000000000 --- a/.github/workflows/dockers-ci-container-image.yml +++ /dev/null @@ -1,54 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: ci-container" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-ci-container-image.yml" - - "dockers/ci/**" - - "Makefile" - - "Makefile.d/**" - - "versions/GO_VERSION" - - "versions/NGT_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-ci-container-image.yml" - - "dockers/ci/**" - - "Makefile" - - "Makefile.d/**" - - "versions/GO_VERSION" - - "versions/NGT_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: ci-container - platforms: linux/amd64 - secrets: inherit diff --git a/.github/workflows/dockers-dev-container-image.yml b/.github/workflows/dockers-dev-container-image.yml deleted file mode 100644 index 5d6543be6e..0000000000 --- a/.github/workflows/dockers-dev-container-image.yml +++ /dev/null @@ -1,49 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: dev-container" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-dev-container-image.yml" - - "dockers/dev/**" - - "Makefile" - - "Makefile.d/**" - - "versions/GO_VERSION" - - "versions/NGT_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-dev-container-image.yml" - - "dockers/dev/**" - - "Makefile" - - "Makefile.d/**" - - "versions/GO_VERSION" - - "versions/NGT_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: dev-container - platforms: linux/amd64 - secrets: inherit diff --git a/.github/workflows/dockers-discoverer-k8s-image.yaml b/.github/workflows/dockers-discoverer-k8s-image.yaml new file mode 100644 index 0000000000..048bebc47d --- /dev/null +++ b/.github/workflows/dockers-discoverer-k8s-image.yaml @@ -0,0 +1,262 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: discoverer-k8s" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-discoverer-k8s-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/discoverer/k8s/*.go + - dockers/discoverer/k8s/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/metrics/node/*.go + - internal/k8s/metrics/pod/*.go + - internal/k8s/node/*.go + - internal/k8s/pod/*.go + - internal/k8s/service/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/discoverer/k8s/config/*.go + - pkg/discoverer/k8s/handler/grpc/*.go + - pkg/discoverer/k8s/handler/rest/*.go + - pkg/discoverer/k8s/router/*.go + - pkg/discoverer/k8s/service/*.go + - pkg/discoverer/k8s/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-discoverer-k8s-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/discoverer/k8s/*.go + - dockers/discoverer/k8s/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/metrics/node/*.go + - internal/k8s/metrics/pod/*.go + - internal/k8s/node/*.go + - internal/k8s/pod/*.go + - internal/k8s/service/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/discoverer/k8s/config/*.go + - pkg/discoverer/k8s/handler/grpc/*.go + - pkg/discoverer/k8s/handler/rest/*.go + - pkg/discoverer/k8s/router/*.go + - pkg/discoverer/k8s/service/*.go + - pkg/discoverer/k8s/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: discoverer-k8s + secrets: inherit diff --git a/.github/workflows/dockers-discoverer-k8s-image.yml b/.github/workflows/dockers-discoverer-k8s-image.yml deleted file mode 100644 index 5c735147ef..0000000000 --- a/.github/workflows/dockers-discoverer-k8s-image.yml +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: discoverer-k8s" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-discoverer-k8s-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "apis/grpc/**" - - "pkg/discoverer/k8s/**" - - "cmd/discoverer/k8s/**" - - "dockers/discoverer/k8s/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-discoverer-k8s-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "apis/grpc/**" - - "pkg/discoverer/k8s/**" - - "cmd/discoverer/k8s/**" - - "dockers/discoverer/k8s/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: discoverer-k8s - secrets: inherit diff --git a/.github/workflows/dockers-example-client-image.yaml b/.github/workflows/dockers-example-client-image.yaml new file mode 100644 index 0000000000..b529a4bc25 --- /dev/null +++ b/.github/workflows/dockers-example-client-image.yaml @@ -0,0 +1,68 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: example-client" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-example-client-image.yaml + - Makefile + - Makefile.d/** + - apis/proto/** + - dockers/example/client/Dockerfile + - example/client/** + - go.mod + - go.sum + - hack/docker/gen/main.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-example-client-image.yaml + - Makefile + - Makefile.d/** + - apis/proto/** + - dockers/example/client/Dockerfile + - example/client/** + - go.mod + - go.sum + - hack/docker/gen/main.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: example-client + secrets: inherit diff --git a/.github/workflows/dockers-gateway-filter-image.yaml b/.github/workflows/dockers-gateway-filter-image.yaml new file mode 100644 index 0000000000..cc361b2435 --- /dev/null +++ b/.github/workflows/dockers-gateway-filter-image.yaml @@ -0,0 +1,262 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: gateway-filter" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-gateway-filter-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/filter/egress/*.go + - apis/grpc/v1/filter/ingress/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/gateway/filter/*.go + - dockers/gateway/filter/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/filter/egress/*.go + - internal/client/v1/client/filter/ingress/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/core/algorithm/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/gateway/filter/config/*.go + - pkg/gateway/filter/handler/grpc/*.go + - pkg/gateway/filter/handler/rest/*.go + - pkg/gateway/filter/router/*.go + - pkg/gateway/filter/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-gateway-filter-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/filter/egress/*.go + - apis/grpc/v1/filter/ingress/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/gateway/filter/*.go + - dockers/gateway/filter/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/filter/egress/*.go + - internal/client/v1/client/filter/ingress/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/core/algorithm/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/gateway/filter/config/*.go + - pkg/gateway/filter/handler/grpc/*.go + - pkg/gateway/filter/handler/rest/*.go + - pkg/gateway/filter/router/*.go + - pkg/gateway/filter/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: gateway-filter + secrets: inherit diff --git a/.github/workflows/dockers-gateway-filter-image.yml b/.github/workflows/dockers-gateway-filter-image.yml deleted file mode 100644 index 05b2e9e23b..0000000000 --- a/.github/workflows/dockers-gateway-filter-image.yml +++ /dev/null @@ -1,69 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: gateway-filter" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-gateway-filter-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/**/*_mock.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/gateway/filter/**" - - "cmd/gateway/filter/**" - - "pkg/gateway/internal/**" - - "dockers/gateway/filter/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-gateway-filter-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/**/*_mock.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/gateway/filter/**" - - "cmd/gateway/filter/**" - - "pkg/gateway/internal/**" - - "dockers/gateway/filter/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: gateway-filter - secrets: inherit diff --git a/.github/workflows/dockers-gateway-lb-image.yaml b/.github/workflows/dockers-gateway-lb-image.yaml new file mode 100644 index 0000000000..9a28c196e0 --- /dev/null +++ b/.github/workflows/dockers-gateway-lb-image.yaml @@ -0,0 +1,260 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: gateway-lb" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-gateway-lb-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/gateway/lb/*.go + - dockers/gateway/lb/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/core/algorithm/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/gateway/lb/config/*.go + - pkg/gateway/lb/handler/grpc/*.go + - pkg/gateway/lb/handler/rest/*.go + - pkg/gateway/lb/router/*.go + - pkg/gateway/lb/service/*.go + - pkg/gateway/lb/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-gateway-lb-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/gateway/lb/*.go + - dockers/gateway/lb/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/core/algorithm/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/gateway/lb/config/*.go + - pkg/gateway/lb/handler/grpc/*.go + - pkg/gateway/lb/handler/rest/*.go + - pkg/gateway/lb/router/*.go + - pkg/gateway/lb/service/*.go + - pkg/gateway/lb/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: gateway-lb + secrets: inherit diff --git a/.github/workflows/dockers-gateway-lb-image.yml b/.github/workflows/dockers-gateway-lb-image.yml deleted file mode 100644 index c3f31fbdc7..0000000000 --- a/.github/workflows/dockers-gateway-lb-image.yml +++ /dev/null @@ -1,69 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: gateway-lb" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-gateway-lb-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/**/*_mock.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/gateway/lb/**" - - "cmd/gateway/lb/**" - - "pkg/gateway/internal/**" - - "dockers/gateway/lb/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-gateway-lb-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/**/*_mock.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/gateway/lb/**" - - "cmd/gateway/lb/**" - - "pkg/gateway/internal/**" - - "dockers/gateway/lb/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: gateway-lb - secrets: inherit diff --git a/.github/workflows/dockers-gateway-mirror-image.yaml b/.github/workflows/dockers-gateway-mirror-image.yaml index d293386b01..4f6e0c485c 100644 --- a/.github/workflows/dockers-gateway-mirror-image.yaml +++ b/.github/workflows/dockers-gateway-mirror-image.yaml @@ -13,50 +13,249 @@ # See the License for the specific language governing permissions and # limitations under the License. # + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + name: "Build docker image: gateway-mirror" on: push: branches: - main + - release/v*.* + - "!release/v*.*.*" tags: - "*.*.*" - - "v*.*.*" - "*.*.*-*" - - "v*.*.*-*" + - v*.*.* + - v*.*.*-* pull_request: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/dockers-gateway-mirror-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/**/*_mock.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/gateway/mirror/**" - - "cmd/gateway/mirror/**" - - "pkg/gateway/internal/**" - - "dockers/gateway/mirror/Dockerfile" - - "versions/GO_VERSION" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-gateway-mirror-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/mirror/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/gateway/mirror/*.go + - dockers/gateway/mirror/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/mirror/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/hash/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/vald/mirror/api/v1/*.go + - internal/k8s/vald/mirror/target/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/gateway/mirror/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/gateway/mirror/config/*.go + - pkg/gateway/mirror/handler/grpc/*.go + - pkg/gateway/mirror/handler/rest/*.go + - pkg/gateway/mirror/router/*.go + - pkg/gateway/mirror/service/*.go + - pkg/gateway/mirror/usecase/*.go + - versions/GO_VERSION pull_request_target: paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/dockers-gateway-mirror-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/**/*_mock.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/gateway/mirror/**" - - "cmd/gateway/nirror/**" - - "pkg/gateway/internal/**" - - "dockers/gateway/mirror/Dockerfile" - - "versions/GO_VERSION" + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-gateway-mirror-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/mirror/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/gateway/mirror/*.go + - dockers/gateway/mirror/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/mirror/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/hash/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/vald/mirror/api/v1/*.go + - internal/k8s/vald/mirror/target/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/gateway/mirror/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/gateway/mirror/config/*.go + - pkg/gateway/mirror/handler/grpc/*.go + - pkg/gateway/mirror/handler/rest/*.go + - pkg/gateway/mirror/router/*.go + - pkg/gateway/mirror/service/*.go + - pkg/gateway/mirror/usecase/*.go + - versions/GO_VERSION jobs: build: uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/dockers-helm-operator-image.yaml b/.github/workflows/dockers-helm-operator-image.yaml new file mode 100644 index 0000000000..9b62139dbe --- /dev/null +++ b/.github/workflows/dockers-helm-operator-image.yaml @@ -0,0 +1,68 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: helm-operator" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-helm-operator-image.yaml + - Makefile + - Makefile.d/** + - charts/vald-helm-operator/Chart.yaml + - charts/vald-helm-operator/templates/** + - charts/vald-helm-operator/values.yaml + - charts/vald/Chart.yaml + - charts/vald/templates/** + - charts/vald/values.yaml + - dockers/operator/helm/Dockerfile + - hack/docker/gen/main.go + - versions/OPERATOR_SDK_VERSION + pull_request_target: + paths: + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-helm-operator-image.yaml + - Makefile + - Makefile.d/** + - charts/vald-helm-operator/Chart.yaml + - charts/vald-helm-operator/templates/** + - charts/vald-helm-operator/values.yaml + - charts/vald/Chart.yaml + - charts/vald/templates/** + - charts/vald/values.yaml + - dockers/operator/helm/Dockerfile + - hack/docker/gen/main.go + - versions/OPERATOR_SDK_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: helm-operator + secrets: inherit diff --git a/.github/workflows/dockers-helm-operator-image.yml b/.github/workflows/dockers-helm-operator-image.yml deleted file mode 100644 index 4b0f6cde12..0000000000 --- a/.github/workflows/dockers-helm-operator-image.yml +++ /dev/null @@ -1,59 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: helm-operator" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-helm-operator-image.yml" - - "dockers/operator/helm/Dockerfile" - - "charts/vald/Chart.yaml" - - "charts/vald/values.yaml" - - "charts/vald/templates/**" - - "charts/vald-helm-operator/Chart.yaml" - - "charts/vald-helm-operator/values.yaml" - - "charts/vald-helm-operator/templates/**" - - "versions/OPERATOR_SDK_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-helm-operator-image.yml" - - "dockers/operator/helm/Dockerfile" - - "charts/vald/Chart.yaml" - - "charts/vald/values.yaml" - - "charts/vald/templates/**" - - "charts/vald-helm-operator/Chart.yaml" - - "charts/vald-helm-operator/values.yaml" - - "charts/vald-helm-operator/templates/**" - - "versions/OPERATOR_SDK_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: operator/helm - secrets: inherit diff --git a/.github/workflows/dockers-image-scan.yml b/.github/workflows/dockers-image-scan.yml deleted file mode 100644 index 7d68caa4e9..0000000000 --- a/.github/workflows/dockers-image-scan.yml +++ /dev/null @@ -1,92 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Docker image scanning" -on: - schedule: - - cron: "0 1 * * *" -concurrency: - group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref != 'refs/heads/main' && github.ref || github.sha }}-${{ github.event_name }} - cancel-in-progress: true -jobs: - dump-contexts-to-log: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/dump-context - agent-ngt: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: agent-ngt - agent-faiss: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: agent-faiss - agent-sidecar: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: agent-sidecar - ci-container: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: ci-container - dev-container: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: dev-container - discoverer-k8s: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: discoverer-k8s - gateway-lb: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: gateway-lb - gateway-mirror: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: gateway-mirror - gateway-filter: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: gateway-filter - index-correction: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: index-correction - index-creation: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: index-creation - index-save: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: index-save - loadtest: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: loadtest - manager-index: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: manager-index - operator-helm: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: operator/helm - readreplica-rotate: - uses: ./.github/workflows/_docker-image-scan.yaml - with: - target: readreplica-rotate diff --git a/.github/workflows/dockers-index-correction-image.yaml b/.github/workflows/dockers-index-correction-image.yaml new file mode 100644 index 0000000000..f8f65b969f --- /dev/null +++ b/.github/workflows/dockers-index-correction-image.yaml @@ -0,0 +1,252 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: index-correction" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-correction-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/index/job/correction/*.go + - dockers/index/job/correction/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/pogreb/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/index/job/correction/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/correction/config/*.go + - pkg/index/job/correction/service/*.go + - pkg/index/job/correction/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-correction-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/index/job/correction/*.go + - dockers/index/job/correction/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/pogreb/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/index/job/correction/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/correction/config/*.go + - pkg/index/job/correction/service/*.go + - pkg/index/job/correction/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: index-correction + secrets: inherit diff --git a/.github/workflows/dockers-index-correction.yml b/.github/workflows/dockers-index-correction.yml deleted file mode 100644 index 850cf20320..0000000000 --- a/.github/workflows/dockers-index-correction.yml +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: index-correction" -on: - push: - branches: - - main - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-index-correction.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/job/correction/**" - - "cmd/index/job/correction/**" - - "dockers/index/job/correction/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-index-correction.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/job/correction/**" - - "cmd/index/job/correction/**" - - "dockers/index/job/correction/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: index-correction - secrets: inherit diff --git a/.github/workflows/dockers-index-creation-image.yaml b/.github/workflows/dockers-index-creation-image.yaml new file mode 100644 index 0000000000..0436b46a0d --- /dev/null +++ b/.github/workflows/dockers-index-creation-image.yaml @@ -0,0 +1,246 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: index-creation" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-creation-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/index/job/creation/*.go + - dockers/index/job/creation/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/creation/config/*.go + - pkg/index/job/creation/service/*.go + - pkg/index/job/creation/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-creation-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/index/job/creation/*.go + - dockers/index/job/creation/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/creation/config/*.go + - pkg/index/job/creation/service/*.go + - pkg/index/job/creation/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: index-creation + secrets: inherit diff --git a/.github/workflows/dockers-index-creation.yml b/.github/workflows/dockers-index-creation.yml deleted file mode 100644 index d2ef7c6186..0000000000 --- a/.github/workflows/dockers-index-creation.yml +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: index-creation" -on: - push: - branches: - - main - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-index-creation.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/job/creation/**" - - "cmd/index/job/creation/**" - - "dockers/index/job/creation/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-index-creation.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/job/creation/**" - - "cmd/index/job/creation/**" - - "dockers/index/job/creation/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: index-creation - secrets: inherit diff --git a/.github/workflows/dockers-index-deletion-image.yaml b/.github/workflows/dockers-index-deletion-image.yaml new file mode 100644 index 0000000000..653763aad7 --- /dev/null +++ b/.github/workflows/dockers-index-deletion-image.yaml @@ -0,0 +1,246 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: index-deletion" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-deletion-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/index/job/deletion/*.go + - dockers/index/job/deletion/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/deletion/config/*.go + - pkg/index/job/deletion/service/*.go + - pkg/index/job/deletion/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-deletion-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/index/job/deletion/*.go + - dockers/index/job/deletion/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/deletion/config/*.go + - pkg/index/job/deletion/service/*.go + - pkg/index/job/deletion/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: index-deletion + secrets: inherit diff --git a/.github/workflows/dockers-index-operator-image.yaml b/.github/workflows/dockers-index-operator-image.yaml new file mode 100644 index 0000000000..6e33ad364a --- /dev/null +++ b/.github/workflows/dockers-index-operator-image.yaml @@ -0,0 +1,244 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: index-operator" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-operator-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/index/operator/*.go + - dockers/index/operator/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/client/*.go + - internal/k8s/v2/pod/*.go + - internal/k8s/vald/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/operator/config/*.go + - pkg/index/operator/service/*.go + - pkg/index/operator/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-operator-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/index/operator/*.go + - dockers/index/operator/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/client/*.go + - internal/k8s/v2/pod/*.go + - internal/k8s/vald/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/operator/config/*.go + - pkg/index/operator/service/*.go + - pkg/index/operator/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: index-operator + secrets: inherit diff --git a/.github/workflows/dockers-index-operator.yml b/.github/workflows/dockers-index-operator.yml deleted file mode 100644 index e6ea6737ad..0000000000 --- a/.github/workflows/dockers-index-operator.yml +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: index-operator" -on: - push: - branches: - - main - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-index-operator.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/operator/**" - - "cmd/index/operator/**" - - "dockers/index/operator/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-index-operator.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/operator/**" - - "cmd/index/operator/**" - - "dockers/index/operator/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: index-operator - secrets: inherit diff --git a/.github/workflows/dockers-index-save-image.yaml b/.github/workflows/dockers-index-save-image.yaml new file mode 100644 index 0000000000..4506c14dc3 --- /dev/null +++ b/.github/workflows/dockers-index-save-image.yaml @@ -0,0 +1,246 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: index-save" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-save-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/index/job/save/*.go + - dockers/index/job/save/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/save/config/*.go + - pkg/index/job/save/service/*.go + - pkg/index/job/save/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-index-save-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/index/job/save/*.go + - dockers/index/job/save/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/discoverer/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/save/config/*.go + - pkg/index/job/save/service/*.go + - pkg/index/job/save/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: index-save + secrets: inherit diff --git a/.github/workflows/dockers-index-save.yml b/.github/workflows/dockers-index-save.yml deleted file mode 100644 index 87fb3fa021..0000000000 --- a/.github/workflows/dockers-index-save.yml +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: index-save" -on: - push: - branches: - - main - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-index-save.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/job/save/**" - - "cmd/index/job/save/**" - - "dockers/index/job/save/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-index-save.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/job/save/**" - - "cmd/index/job/save/**" - - "dockers/index/job/save/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: index-save - secrets: inherit diff --git a/.github/workflows/dockers-loadtest-image.yaml b/.github/workflows/dockers-loadtest-image.yaml new file mode 100644 index 0000000000..6d74532066 --- /dev/null +++ b/.github/workflows/dockers-loadtest-image.yaml @@ -0,0 +1,217 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: loadtest" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-loadtest-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/tools/cli/loadtest/*.go + - dockers/tools/cli/loadtest/Dockerfile + - go.mod + - go.sum + - hack/benchmark/assets/x1b/*.go + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/rest/*.go + - internal/net/quic/*.go + - internal/observability/attribute/*.go + - internal/observability/metrics/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/server/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/tools/cli/loadtest/assets/*.go + - pkg/tools/cli/loadtest/config/*.go + - pkg/tools/cli/loadtest/service/*.go + - pkg/tools/cli/loadtest/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-loadtest-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/tools/cli/loadtest/*.go + - dockers/tools/cli/loadtest/Dockerfile + - go.mod + - go.sum + - hack/benchmark/assets/x1b/*.go + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/rest/*.go + - internal/net/quic/*.go + - internal/observability/attribute/*.go + - internal/observability/metrics/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/server/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/tools/cli/loadtest/assets/*.go + - pkg/tools/cli/loadtest/config/*.go + - pkg/tools/cli/loadtest/service/*.go + - pkg/tools/cli/loadtest/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: loadtest + platforms: linux/amd64 + secrets: inherit diff --git a/.github/workflows/dockers-loadtest-image.yml b/.github/workflows/dockers-loadtest-image.yml deleted file mode 100644 index 810ee9dbab..0000000000 --- a/.github/workflows/dockers-loadtest-image.yml +++ /dev/null @@ -1,66 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: loadtest" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-loadtest-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/tools/cli/loadtest/**" - - "cmd/tools/cli/loadtest/**" - - "dockers/tools/cli/loadtest/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-loadtest-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/tools/cli/loadtest/**" - - "cmd/tools/cli/loadtest/**" - - "dockers/tools/cli/loadtest/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: loadtest - platforms: linux/amd64 - secrets: inherit diff --git a/.github/workflows/dockers-manager-index-image.yaml b/.github/workflows/dockers-manager-index-image.yaml new file mode 100644 index 0000000000..580fbfbbcd --- /dev/null +++ b/.github/workflows/dockers-manager-index-image.yaml @@ -0,0 +1,266 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: manager-index" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-manager-index-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/manager/index/*.go + - dockers/manager/index/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/*.go + - internal/client/v1/client/agent/core/*.go + - internal/client/v1/client/discoverer/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/manager/index/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/manager/index/config/*.go + - pkg/manager/index/handler/grpc/*.go + - pkg/manager/index/handler/rest/*.go + - pkg/manager/index/router/*.go + - pkg/manager/index/service/*.go + - pkg/manager/index/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-manager-index-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/agent/core/*.go + - apis/grpc/v1/discoverer/*.go + - apis/grpc/v1/payload/*.go + - apis/grpc/v1/rpc/errdetails/*.go + - apis/grpc/v1/vald/*.go + - apis/proto/** + - cmd/manager/index/*.go + - dockers/manager/index/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/client/v1/client/*.go + - internal/client/v1/client/agent/core/*.go + - internal/client/v1/client/discoverer/*.go + - internal/client/v1/client/vald/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/backoff/*.go + - internal/observability/metrics/circuitbreaker/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/manager/index/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/manager/index/config/*.go + - pkg/manager/index/handler/grpc/*.go + - pkg/manager/index/handler/rest/*.go + - pkg/manager/index/router/*.go + - pkg/manager/index/service/*.go + - pkg/manager/index/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: manager-index + secrets: inherit diff --git a/.github/workflows/dockers-manager-index-image.yml b/.github/workflows/dockers-manager-index-image.yml deleted file mode 100644 index 1e2a6addb3..0000000000 --- a/.github/workflows/dockers-manager-index-image.yml +++ /dev/null @@ -1,65 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: manager-index" -on: - push: - branches: - - "main" - - "release/v*.*" - - "!release/v*.*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-manager-index-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/manager/index/**" - - "cmd/manager/index/**" - - "dockers/manager/index/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-manager-index-image.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "!internal/k8s/**" - - "apis/grpc/**" - - "pkg/manager/index/**" - - "cmd/manager/index/**" - - "dockers/manager/index/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: manager-index - secrets: inherit diff --git a/.github/workflows/dockers-readreplica-rotate-image.yaml b/.github/workflows/dockers-readreplica-rotate-image.yaml new file mode 100644 index 0000000000..ab1bc9b67e --- /dev/null +++ b/.github/workflows/dockers-readreplica-rotate-image.yaml @@ -0,0 +1,242 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +name: "Build docker image: readreplica-rotate" +on: + push: + branches: + - main + - release/v*.* + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - v*.*.* + - v*.*.*-* + pull_request: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-readreplica-rotate-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/index/job/readreplica/rotate/*.go + - dockers/index/job/readreplica/rotate/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/client/*.go + - internal/k8s/vald/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/readreplica/rotate/config/*.go + - pkg/index/job/readreplica/rotate/service/*.go + - pkg/index/job/readreplica/rotate/usecase/*.go + - versions/GO_VERSION + pull_request_target: + paths: + - "!**/*_mock.go" + - "!**/*_test.go" + - .github/actions/docker-build/action.yaml + - .github/workflows/_docker-image.yaml + - .github/workflows/dockers-readreplica-rotate-image.yaml + - Makefile + - Makefile.d/** + - apis/grpc/v1/rpc/errdetails/*.go + - apis/proto/** + - cmd/index/job/readreplica/rotate/*.go + - dockers/index/job/readreplica/rotate/Dockerfile + - go.mod + - go.sum + - hack/docker/gen/main.go + - internal/backoff/*.go + - internal/cache/*.go + - internal/cache/cacher/*.go + - internal/cache/gache/*.go + - internal/circuitbreaker/*.go + - internal/config/*.go + - internal/conv/*.go + - internal/db/kvs/redis/*.go + - internal/db/nosql/cassandra/*.go + - internal/db/rdb/mysql/*.go + - internal/db/rdb/mysql/dbr/*.go + - internal/encoding/json/*.go + - internal/errors/*.go + - internal/file/*.go + - internal/info/*.go + - internal/io/*.go + - internal/k8s/*.go + - internal/k8s/client/*.go + - internal/k8s/vald/*.go + - internal/log/*.go + - internal/log/format/*.go + - internal/log/glg/*.go + - internal/log/level/*.go + - internal/log/logger/*.go + - internal/log/nop/*.go + - internal/log/retry/*.go + - internal/log/zap/*.go + - internal/net/*.go + - internal/net/control/*.go + - internal/net/grpc/*.go + - internal/net/grpc/admin/*.go + - internal/net/grpc/codes/*.go + - internal/net/grpc/credentials/*.go + - internal/net/grpc/errdetails/*.go + - internal/net/grpc/health/*.go + - internal/net/grpc/interceptor/client/metric/*.go + - internal/net/grpc/interceptor/client/trace/*.go + - internal/net/grpc/interceptor/server/logging/*.go + - internal/net/grpc/interceptor/server/metric/*.go + - internal/net/grpc/interceptor/server/recover/*.go + - internal/net/grpc/interceptor/server/trace/*.go + - internal/net/grpc/keepalive/*.go + - internal/net/grpc/logger/*.go + - internal/net/grpc/pool/*.go + - internal/net/grpc/proto/*.go + - internal/net/grpc/reflection/*.go + - internal/net/grpc/status/*.go + - internal/net/grpc/types/*.go + - internal/net/http/dump/*.go + - internal/net/http/json/*.go + - internal/net/http/metrics/*.go + - internal/net/http/middleware/*.go + - internal/net/http/rest/*.go + - internal/net/http/routing/*.go + - internal/net/quic/*.go + - internal/observability/*.go + - internal/observability/attribute/*.go + - internal/observability/exporter/*.go + - internal/observability/exporter/otlp/*.go + - internal/observability/metrics/*.go + - internal/observability/metrics/grpc/*.go + - internal/observability/metrics/mem/*.go + - internal/observability/metrics/runtime/cgo/*.go + - internal/observability/metrics/runtime/goroutine/*.go + - internal/observability/metrics/version/*.go + - internal/observability/trace/*.go + - internal/os/*.go + - internal/params/*.go + - internal/rand/*.go + - internal/runner/*.go + - internal/safety/*.go + - internal/servers/*.go + - internal/servers/server/*.go + - internal/servers/starter/*.go + - internal/strings/*.go + - internal/sync/*.go + - internal/sync/errgroup/*.go + - internal/sync/semaphore/*.go + - internal/sync/singleflight/*.go + - internal/timeutil/*.go + - internal/timeutil/location/*.go + - internal/tls/*.go + - internal/version/*.go + - pkg/index/job/readreplica/rotate/config/*.go + - pkg/index/job/readreplica/rotate/service/*.go + - pkg/index/job/readreplica/rotate/usecase/*.go + - versions/GO_VERSION +jobs: + build: + uses: ./.github/workflows/_docker-image.yaml + with: + target: readreplica-rotate + secrets: inherit diff --git a/.github/workflows/dockers-readreplica-rotate.yml b/.github/workflows/dockers-readreplica-rotate.yml deleted file mode 100644 index 0a46f0b2c4..0000000000 --- a/.github/workflows/dockers-readreplica-rotate.yml +++ /dev/null @@ -1,63 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Build docker image: readreplica-rotate" -on: - push: - branches: - - main - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-readreplica-rotate.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/job/readreplica/rotate/**" - - "cmd/index/job/readreplica/rotate/**" - - "dockers/index/job/readreplica/rotate/Dockerfile" - - "versions/GO_VERSION" - pull_request_target: - paths: - - ".github/actions/docker-build/action.yaml" - - ".github/workflows/_docker-image.yaml" - - ".github/workflows/dockers-readreplica-rotate.yml" - - "go.mod" - - "go.sum" - - "internal/**" - - "!internal/**/*_test.go" - - "!internal/db/**" - - "internal/k8s/**" - - "apis/grpc/**" - - "pkg/index/job/readreplica/rotate/**" - - "cmd/index/job/readreplica/rotate/**" - - "dockers/index/job/readreplica/rotate/Dockerfile" - - "versions/GO_VERSION" -jobs: - build: - uses: ./.github/workflows/_docker-image.yaml - with: - target: readreplica-rotate - secrets: inherit diff --git a/.github/workflows/dockers-release-branch-image.yaml b/.github/workflows/dockers-release-branch-image.yaml index 9ec53aa4fe..06e8d80b8e 100644 --- a/.github/workflows/dockers-release-branch-image.yaml +++ b/.github/workflows/dockers-release-branch-image.yaml @@ -88,6 +88,12 @@ jobs: with: target: index-creation secrets: inherit + index-deletion: + needs: [dump-contexts-to-log] + uses: ./.github/workflows/_docker-image.yaml + with: + target: index-deletion + secrets: inherit index-save: needs: [dump-contexts-to-log] uses: ./.github/workflows/_docker-image.yaml diff --git a/.github/workflows/e2e-chaos.yaml b/.github/workflows/e2e-chaos.yaml index 20a846b582..fe91004ccd 100644 --- a/.github/workflows/e2e-chaos.yaml +++ b/.github/workflows/e2e-chaos.yaml @@ -36,7 +36,7 @@ jobs: - uses: ./.github/actions/dump-context detect-ci-container: if: startsWith( github.ref, 'refs/tags/') || github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-chaos' - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml agent-failure: name: "E2E chaos test (Agent failure: to test insert/search works even if one of the agents is failing)" needs: [detect-ci-container] diff --git a/.github/workflows/e2e-code-bench-agent.yaml b/.github/workflows/e2e-code-bench-agent.yaml index 4358341065..9d11e71b6a 100644 --- a/.github/workflows/e2e-code-bench-agent.yaml +++ b/.github/workflows/e2e-code-bench-agent.yaml @@ -19,7 +19,7 @@ on: branches: - main paths: - - ".github/workflows/e2e-bench-agent.yml" + - ".github/workflows/e2e-bench-agent.yaml" - "internal/core/**" - "internal/client/**" - "internal/net/**" @@ -31,7 +31,7 @@ on: - "versions/NGT_VERSION" pull_request: paths: - - ".github/workflows/e2e-bench-agent.yml" + - ".github/workflows/e2e-bench-agent.yaml" - "internal/core/**" - "internal/client/**" - "internal/net/**" @@ -51,7 +51,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml grpc-sequential: name: grpc-sequential runs-on: ubuntu-latest @@ -64,7 +64,6 @@ jobs: - name: Set Git config run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: ./.github/actions/setup-go - name: Run grpc-sequential run: | make hack/benchmark/assets/dataset/${{ env.DATASET }} @@ -92,7 +91,6 @@ jobs: - name: Set Git config run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: ./.github/actions/setup-go - name: Run grpc-stream run: | make hack/benchmark/assets/dataset/${{ env.DATASET }} diff --git a/.github/workflows/e2e-max-dim.yml b/.github/workflows/e2e-max-dim.yml deleted file mode 100644 index 1d03534703..0000000000 --- a/.github/workflows/e2e-max-dim.yml +++ /dev/null @@ -1,122 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# - -name: "Run E2E Max Dimension test" -on: - push: - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - types: - - "labeled" -jobs: - dump-contexts-to-log: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/dump-context - detect-ci-container: - if: startsWith( github.ref, 'refs/tags/') || github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-max-dim' - uses: ./.github/workflows/_detect-ci-container.yml - e2e-max-dimension-insert: - name: "E2E test (Max Dimension Insert: skip strict exist check)" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - - name: Run E2E MaxDimension - run: | - go get github.com/vdaas/vald-client-go/v1/payload - go get github.com/vdaas/vald-client-go/v1/vald - BIT=18 - RELEASE=`helm list | grep vald | awk '{print $1}'` - while [ $BIT -ne 32 ]; do - echo "Start: Bit=${BIT}" - FILE="tmp.log" - touch ${FILE} - FILEPATH=`readlink -f ${FILE}` - DIM=$((1<<$BIT)) - if [ $BIT = "32" ]; then - DIM=$(($DIM-1)) - fi - - make k8s/vald/deploy HELM_VALUES=${VALUES} HELM_EXTRA_OPTIONS="--set agent.ngt.dimension=${DIM} ${HELM_EXTRA_OPTIONS}" - - sleep 3 - - kubectl rollout restart statefulset vald-agent - - sleep 30 - - kubectl wait --for=condition=Ready pod -l ${WAIT_FOR_SELECTOR} --timeout=${WAIT_FOR_TIMEOUT} - POD_NAME=`kubectl get pods | grep vald-lb-gateway | awk '{print $1}'` - make E2E_BIND_PORT=8081 \ - E2E_MAX_DIM_RESULT_FILEPATH=${FILEPATH} \ - E2E_MAX_DIM_BIT=${BIT} \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e/maxdim - CODE=`sed -n 1P ${FILEPATH}` - if [ ${CODE} = "ResourceExhausted" ]; then - echo "Finish: Bit=${BIT} with ${CODE}" - BIT=$(($BIT-1)) - rm ${FILEPATH} - break; - fi - if [ ${CODE} != "OK" ]; then - echo "Finish: Bit=${BIT} with Error: ${CODE}" - rm ${FILEPATH} - break; - fi - echo "Finish: Bit=${BIT}" - BIT=$(($BIT+1)) - rm ${FILEPATH} - - echo "removing cluster" - make k8s/vald/delete HELM_VALUES=${VALUES} HELM_EXTRA_OPTIONS="${HELM_EXTRA_OPTIONS}" - done - echo "MAX_BIT=${BIT}" >> $GITHUB_OUTPUT - echo "MAX_BIT=${BIT}" - env: - HELM_EXTRA_OPTIONS: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - WAIT_FOR_SELECTOR: app=vald-agent - WAIT_FOR_TIMEOUT: 29m - VALUES: .github/helm/values/values-max-dim.yaml - timeout-minutes: 60 - slack-notification: - name: "Slack notification" - needs: [e2e-max-dimension-insert] - runs-on: ubuntu-latest - if: startsWith( github.ref, 'refs/tags/') - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/notify-slack - with: - author_name: "E2E max dim test" - slack_notify_webhook_url: ${{ secrets.SLACK_NOTIFY_WEBHOOK_URL }} diff --git a/.github/workflows/e2e-profiling.yml b/.github/workflows/e2e-profiling.yml deleted file mode 100644 index 3205c3f573..0000000000 --- a/.github/workflows/e2e-profiling.yml +++ /dev/null @@ -1,196 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Run E2E profiling" -on: - push: - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - types: - - "labeled" -env: - DATASET: fashion-mnist-784-euclidean.hdf5 -jobs: - dump-contexts-to-log: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/dump-context - detect-ci-container: - if: startsWith(github.ref, 'refs/tags/') || github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-profiling' - uses: ./.github/workflows/_detect-ci-container.yml - e2e-profiling: - name: "E2E profiling" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - - name: Deploy Vald - id: deploy_vald - uses: ./.github/actions/e2e-deploy-vald - with: - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-profile.yaml - wait_for_selector: app=vald-lb-gateway - - name: Deploy profefe - run: | - make k8s/metrics/profefe/deploy - kubectl patch cronjob kprofefe -p '{"spec": {"schedule": "*/1 * * * *"}}' - kubectl wait --for=condition=ready pod -l app=profefe --timeout=300s - - name: Run E2E CRUD - continue-on-error: true - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_DATASET_NAME=${{ env.DATASET }} \ - E2E_INSERT_COUNT=10000 \ - E2E_SEARCH_COUNT=100 \ - E2E_SEARCH_BY_ID_COUNT=100 \ - E2E_GET_OBJECT_COUNT=100 \ - E2E_UPDATE_COUNT=100 \ - E2E_UPSERT_COUNT=100 \ - E2E_REMOVE_COUNT=100 \ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=3m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e - env: - POD_NAME: ${{ steps.deploy_vald.outputs.POD_NAME }} - - name: Get profiles - run: | - mkdir -p profiles - kubectl port-forward deployment/profefe 10100:10100 & - sleep 3 - for svc in vald-agent-ngt vald-lb-gateway vald-discoverer vald-manager-index - do - for t in heap cpu goroutine threadcreate - do - curl "http://localhost:10100/api/0/profiles/merge?service=${svc}&type=${t}&from=2021-07-01T00:00:00&to=2030-07-01T00:00:00" \ - --output profiles/${svc}-${t}.pb - done - done - - uses: actions/upload-artifact@v4 - with: - name: profiles - path: profiles/* - - uses: actions/cache@v4 - id: cache - with: - path: ./profiles-main - key: ${{ runner.os }}-profiles-main-${{ github.sha }} - restore-keys: ${{ runner.os }}-profiles-main- - - name: Update cache - if: startsWith(github.ref, 'refs/tags/') - run: | - mkdir -p profiles-main - cp -f profiles/* profiles-main/ - cp -f versions/VALD_VERSION profiles-main/ - - name: Generate graphs - shell: bash - run: | - tag="unknown" - if [[ "${{ github.event_name }}" == "pull_request" ]]; then - tag="pr-${{ github.event.pull_request.number }}" - elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then - tag="main-${{ github.sha }}" - elif [[ "${{ github.ref }}" == "refs/heads/release/*" ]]; then - version=$(echo ${{ github.ref }} | sed 's/refs\/heads\/release\///') - tag="v${version}-${{ github.sha }}" - elif [[ -f profiles-main/VALD_VERSION && -s profiles-main/VALD_VERSION ]]; then - tag=$(cat profiles-main/VALD_VERSION) - fi - - mkdir -p graphs - for svc in vald-agent-ngt vald-lb-gateway vald-discoverer vald-manager-index - do - for t in heap cpu goroutine threadcreate - do - echo "generating: graphs/${svc}-${t}-${GITHUB_SHA::6}.svg" - go tool pprof --svg profiles/${svc}-${t}.pb > graphs/${svc}-${t}-${GITHUB_SHA::6}.svg - echo "generating: graphs/${svc}-${t}-${GITHUB_SHA::6}.png" - go tool pprof --png profiles/${svc}-${t}.pb > graphs/${svc}-${t}-${GITHUB_SHA::6}.png - if [ -f profiles-main/${svc}-${t}.pb ]; then - echo "generating: graphs/${svc}-${t}-main-${GITHUB_SHA::6}.svg" - go tool pprof --svg -base=profiles-main/${svc}-${t}.pb profiles/${svc}-${t}.pb > graphs/${svc}-${t}-diff-${tag}-${GITHUB_SHA::6}.svg - echo "generating: graphs/${svc}-${t}-main-${GITHUB_SHA::6}.png" - go tool pprof --png -base=profiles-main/${svc}-${t}.pb profiles/${svc}-${t}.pb > graphs/${svc}-${t}-diff-${tag}-${GITHUB_SHA::6}.png - else - echo "skipping: profiles-main/${svc}-${t}.pb does not exist." - fi - done - done - - uses: actions/upload-artifact@v4 - with: - name: graphs - path: graphs/* - - name: Upload to vald-ci-images repository - if: github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-profiling' - run: | - CLONE_DIR=$(mktemp -d) - git clone --depth 1 https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com/vdaas-ci/vald-ci-images.git $CLONE_DIR - cp -r graphs $CLONE_DIR/${GITHUB_SHA::6} - cd $CLONE_DIR - git config user.email "vald@vdaas.org" - git config user.name "vdaas-ci" - git add . - git commit -m ":robot: Add ${GITHUB_SHA::6}" - git push https://${GITHUB_USER}:${GITHUB_TOKEN}@github.com/vdaas-ci/vald-ci-images.git --set-upstream main - env: - GITHUB_USER: ${{ secrets.DISPATCH_USER }} - GITHUB_TOKEN: ${{ secrets.DISPATCH_TOKEN }} - - name: Comment - if: github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-profiling' - run: | - base="https://raw.githubusercontent.com/vdaas-ci/vald-ci-images/main/${GITHUB_SHA::6}" - body="" - for svc in vald-agent-ngt vald-lb-gateway vald-discoverer vald-manager-index - do - body="$body" - done - body="$body" - for t in cpu heap - do - body="$body" - for svc in vald-agent-ngt vald-lb-gateway vald-discoverer vald-manager-index - do - body="$body" - done - body="$body" - done - body="$body
type$svc
${t}
" - curl --include --verbose --fail \ - -H "Accept: application/json" \ - -H "Content-Type:application/json" \ - -H "Authorization: token ${GITHUB_TOKEN}" \ - --request POST \ - --data "{\"body\": \"# Profile Report\n$body\nother images\"}" \ - $API_URL - env: - GITHUB_TOKEN: ${{ secrets.DISPATCH_TOKEN }} - API_URL: ${{ github.event.pull_request.comments_url }} diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml deleted file mode 100644 index 65fff8b9b5..0000000000 --- a/.github/workflows/e2e.yml +++ /dev/null @@ -1,426 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Run E2E deploy and integration test" -on: - push: - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - pull_request: - types: - - "labeled" -env: - DATASET: fashion-mnist-784-euclidean.hdf5 -jobs: - dump-contexts-to-log: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/dump-context - detect-ci-container: - if: startsWith( github.ref, 'refs/tags/') || github.event.action == 'labeled' && github.event.label.name == 'actions/e2e-deploy' - uses: ./.github/workflows/_detect-ci-container.yml - e2e-stream-crud: - name: "E2E test (Stream CRUD)" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - - name: Deploy Vald - id: deploy_vald - uses: ./.github/actions/e2e-deploy-vald - with: - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-lb.yaml - wait_for_selector: app=vald-lb-gateway - - name: Run E2E CRUD - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_DATASET_NAME=${{ env.DATASET }} \ - E2E_INSERT_COUNT=10000\ - E2E_SEARCH_COUNT=10000 \ - E2E_SEARCH_BY_ID_COUNT=10000 \ - E2E_GET_OBJECT_COUNT=100 \ - E2E_UPDATE_COUNT=100 \ - E2E_UPSERT_COUNT=100 \ - E2E_REMOVE_COUNT=100 \ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=3m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e - env: - POD_NAME: ${{ steps.deploy_vald.outputs.POD_NAME }} - e2e-stream-crud-for-operator: - name: "E2E test (Stream CRUD) for operator" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - - name: Create Helm options for vald-helm-operator - uses: ./.github/actions/setup-e2e - id: vald_helm_operator - with: - target_images: "vdaas/vald-helm-operator" - require_libhdf5: false - require_go: false - require_helm: false - require_k3d: false - - name: Merge Docker image tag - shell: bash - run: | - IMAGE_TAGS=(${{ steps.setup_e2e.outputs.IMAGE_TAGS }}) - - for IMAGE_TAG in "${IMAGE_TAGS[@]}" - do - echo "IMAGE_TAG:${IMAGE_TAG}" - - ARR=(${IMAGE_TAG//=/ }) - FIELD=${ARR[0]} - TAG="\"${ARR[1]}\"" - yq e ".spec.$FIELD=$TAG" -i ./.github/valdrelease/valdrelease.yaml - done - - name: Deploy Vald using Vald Helm Operator - id: deploy_vald - uses: ./.github/actions/e2e-deploy-vald-helm-operator - with: - helm_extra_options: ${{ steps.vald_helm_operator.outputs.HELM_EXTRA_OPTIONS }} - valdrelease: ./.github/valdrelease/valdrelease.yaml - wait_for_selector: app=vald-lb-gateway - - name: Run E2E CRUD - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_DATASET_NAME=${{ env.DATASET }} \ - E2E_INSERT_COUNT=10000 \ - E2E_SEARCH_COUNT=10000 \ - E2E_SEARCH_BY_ID_COUNT=10000 \ - E2E_GET_OBJECT_COUNT=100 \ - E2E_UPDATE_COUNT=100 \ - E2E_UPSERT_COUNT=100 \ - E2E_REMOVE_COUNT=100 \ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=3m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e - env: - POD_NAME: ${{ steps.deploy_vald.outputs.POD_NAME }} - e2e-stream-crud-under-index-management-jobs: - name: "E2E test (Stream CRUD) under index management jobs" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - with: - target_images: "vdaas/vald-agent-ngt vdaas/vald-discoverer-k8s vdaas/vald-lb-gateway vdaas/vald-index-creation vdaas/vald-index-save" - - name: Deploy Vald - id: deploy_vald - uses: ./.github/actions/e2e-deploy-vald - env: - HELM_EXTRA_OPTIONS: "--values .github/helm/values/values-index-management-jobs.yaml" - with: - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} ${{ env.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-lb.yaml - wait_for_selector: app=vald-lb-gateway - - name: Run E2E CRUD - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_DATASET_NAME=${{ env.DATASET }} \ - E2E_INSERT_COUNT=10000\ - E2E_SEARCH_COUNT=10000 \ - E2E_SEARCH_BY_ID_COUNT=10000 \ - E2E_GET_OBJECT_COUNT=100 \ - E2E_UPDATE_COUNT=100 \ - E2E_UPSERT_COUNT=100 \ - E2E_REMOVE_COUNT=100 \ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=3m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e - env: - POD_NAME: ${{ steps.deploy_vald.outputs.POD_NAME }} - e2e-stream-crud-skip-exist-check: - name: "E2E test (Stream CRUD: skip strict exist check)" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - - name: Deploy Vald - id: deploy_vald - uses: ./.github/actions/e2e-deploy-vald - with: - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-lb.yaml - wait_for_selector: app=vald-lb-gateway - - name: Run E2E CRUD - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_DATASET_NAME=${{ env.DATASET }} \ - E2E_INSERT_COUNT=10 \ - E2E_SEARCH_COUNT=10 \ - E2E_SEARCH_BY_ID_COUNT=10 \ - E2E_GET_OBJECT_COUNT=10 \ - E2E_UPDATE_COUNT=10 \ - E2E_UPSERT_COUNT=10 \ - E2E_REMOVE_COUNT=10 \ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=3m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e/skip - env: - POD_NAME: ${{ steps.deploy_vald.outputs.POD_NAME }} - e2e-multiapis-crud: - name: "E2E test (Multi-APIs CRUD)" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - - name: Deploy Vald - id: deploy_vald - uses: ./.github/actions/e2e-deploy-vald - with: - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-lb.yaml - wait_for_selector: app=vald-lb-gateway - - name: Run E2E CRUD - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_DATASET_NAME=${{ env.DATASET }} \ - E2E_INSERT_COUNT=100 \ - E2E_SEARCH_COUNT=10 \ - E2E_SEARCH_BY_ID_COUNT=10 \ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=2m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e/multi - env: - POD_NAME: ${{ steps.deploy_vald.outputs.POD_NAME }} - e2e-jobs: - name: "E2E test (Jobs)" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - - name: Deploy Vald - id: deploy_vald - uses: ./.github/actions/e2e-deploy-vald - with: - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-lb.yaml - wait_for_selector: app=vald-lb-gateway - - name: Run E2E Jobs - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_INSERT_COUNT=10000\ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=3m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e/index/job/correction - env: - POD_NAME: ${{ steps.deploy_vald.outputs.POD_NAME }} - e2e-stream-crud-with-readreplica: - name: "E2E test (Stream CRUD) with read replica" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - with: - require_k3d: "false" - require_minikube: "true" - - name: Deploy Vald - id: deploy_vald - uses: ./.github/actions/e2e-deploy-vald - with: - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-readreplica.yaml - wait_for_selector: app=vald-agent - - name: Deploy Vald Read Replica - id: deploy_vald_readreplica - uses: ./.github/actions/e2e-deploy-vald-readreplica - with: - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-readreplica.yaml - wait_for_selector: app=vald-lb-gateway - - name: Run E2E CRUD with read replica rotation - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_DATASET_NAME=${{ env.DATASET }} \ - E2E_INSERT_COUNT=1000 \ - E2E_SEARCH_COUNT=1000 \ - E2E_SEARCH_BY_ID_COUNT=1000 \ - E2E_GET_OBJECT_COUNT=100 \ - E2E_UPDATE_COUNT=100 \ - E2E_UPSERT_COUNT=100 \ - E2E_REMOVE_COUNT=100 \ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=3m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=default \ - e2e/readreplica - env: - POD_NAME: ${{ steps.deploy_vald_readreplica.outputs.POD_NAME }} - e2e-stream-crud-with-mirror: - name: "E2E test (Stream CRUD) with mirror" - needs: [detect-ci-container] - runs-on: ubuntu-latest - timeout-minutes: 60 - container: - image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} - options: "--add-host host.docker.internal:host-gateway" - steps: - - uses: actions/checkout@v4 - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - name: Setup E2E environment - id: setup_e2e - uses: ./.github/actions/setup-e2e - with: - target_images: "vdaas/vald-agent-ngt vdaas/vald-discoverer-k8s vdaas/vald-lb-gateway vdaas/vald-manager-index vdaas/vald-mirror-gateway" - - name: Deploy Vald-01 - id: deploy_vald_01 - uses: ./.github/actions/e2e-deploy-vald - with: - namespace: vald-01 - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-mirror-01.yaml - wait_for_selector: app=vald-mirror-gateway - - name: Deploy Vald-02 - id: deploy_vald_02 - uses: ./.github/actions/e2e-deploy-vald - with: - namespace: vald-02 - helm_extra_options: ${{ steps.setup_e2e.outputs.HELM_EXTRA_OPTIONS }} - values: .github/helm/values/values-mirror-02.yaml - wait_for_selector: app=vald-mirror-gateway - - name: Deploy Mirror Target - run: | - kubectl apply -f .github/helm/values/vald-mirror-target.yaml -n vald-01 - sleep 5s - kubectl get pods -A && kubectl get vmt -o wide -A - - name: Run E2E CRUD - run: | - make hack/benchmark/assets/dataset/${{ env.DATASET }} - make E2E_BIND_PORT=8081 \ - E2E_DATASET_NAME=${{ env.DATASET }} \ - E2E_INSERT_COUNT=10000\ - E2E_SEARCH_COUNT=10000 \ - E2E_SEARCH_BY_ID_COUNT=10000 \ - E2E_GET_OBJECT_COUNT=100 \ - E2E_UPDATE_COUNT=100 \ - E2E_UPSERT_COUNT=100 \ - E2E_REMOVE_COUNT=100 \ - E2E_WAIT_FOR_CREATE_INDEX_DURATION=3m \ - E2E_TARGET_POD_NAME=${POD_NAME} \ - E2E_TARGET_NAMESPACE=vald-01 \ - e2e - env: - POD_NAME: ${{ steps.deploy_vald_01.outputs.POD_NAME }} - slack-notification: - name: "Slack notification" - if: startsWith( github.ref, 'refs/tags/') - needs: - - e2e-jobs - - e2e-multiapis-crud - - e2e-stream-crud - - e2e-stream-crud-for-operator - - e2e-stream-crud-skip-exist-check - - e2e-stream-crud-under-index-management-jobs - - e2e-stream-crud-with-mirror - - e2e-stream-crud-with-readreplica - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: ./.github/actions/notify-slack - with: - author_name: "E2E deploy test" - slack_notify_webhook_url: ${{ secrets.SLACK_NOTIFY_WEBHOOK_URL }} diff --git a/.github/workflows/semver-major-minor.yaml b/.github/workflows/semver-major-minor.yaml index ec796d619e..d426fde5f5 100644 --- a/.github/workflows/semver-major-minor.yaml +++ b/.github/workflows/semver-major-minor.yaml @@ -95,7 +95,7 @@ jobs: if: ${{ needs.semver-auto.outputs.RELEASE == 'true' }} needs: - semver-auto - uses: ./.github/workflows/_release-pr.yml + uses: ./.github/workflows/_release-pr.yaml with: release_branch_name: ${{ needs.semver-auto.outputs.RELEASE_BRANCH_NAME }} release_tag: ${{ needs.semver-auto.outputs.RELEASE_TAG }} diff --git a/.github/workflows/semver-patch.yaml b/.github/workflows/semver-patch.yaml index fadeec2a40..ef8be183d5 100644 --- a/.github/workflows/semver-patch.yaml +++ b/.github/workflows/semver-patch.yaml @@ -71,7 +71,7 @@ jobs: if: ${{ needs.semver-auto.outputs.RELEASE == 'true' }} needs: - semver-auto - uses: ./.github/workflows/_release-pr.yml + uses: ./.github/workflows/_release-pr.yaml with: release_branch_name: "release/${{ needs.semver-auto.outputs.RELEASE_BRANCH_NAME_SUFFIX }}" release_tag: ${{ needs.semver-auto.outputs.RELEASE_TAG }} diff --git a/.github/workflows/unit-test-rust.yaml b/.github/workflows/unit-test-rust.yaml new file mode 100644 index 0000000000..b164310717 --- /dev/null +++ b/.github/workflows/unit-test-rust.yaml @@ -0,0 +1,51 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +name: "Run unit tests (Rust)" +on: + push: + branches: + - main + - "release/v*.*" + - "!release/v*.*.*" + paths: + - ".github/workflows/test.yaml" + - "rust/**" + pull_request: + paths: + - ".github/workflows/test.yaml" + - "rust/**" +jobs: + dump-contexts-to-log: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: ./.github/actions/dump-context + detect-ci-container: + uses: ./.github/workflows/_detect-ci-container.yaml + test-rust-qbg: + name: Run tests for Rust + runs-on: ubuntu-latest + needs: [detect-ci-container] + container: + image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + steps: + - uses: actions/checkout@v4 + - name: Set Git config + run: | + git config --global --add safe.directory ${GITHUB_WORKSPACE} + - name: Run tests for Rust / gotestfmt + run: | + TEST_RESULT_DIR=${GITHUB_WORKSPACE} make test/rust/qbg diff --git a/.github/workflows/unit-test.yaml b/.github/workflows/unit-test.yaml index b678b4afe1..f8552e977a 100644 --- a/.github/workflows/unit-test.yaml +++ b/.github/workflows/unit-test.yaml @@ -23,7 +23,7 @@ on: paths: - "go.mod" - "go.sum" - - ".github/workflows/test.yml" + - ".github/workflows/unit-test.yaml" - "internal/**" - "pkg/**" - "cmd/**" @@ -31,7 +31,7 @@ on: paths: - "go.mod" - "go.sum" - - ".github/workflows/test.yml" + - ".github/workflows/unit-test.yaml" - "internal/**" - "pkg/**" - "cmd/**" @@ -42,7 +42,7 @@ jobs: - uses: actions/checkout@v4 - uses: ./.github/actions/dump-context detect-ci-container: - uses: ./.github/workflows/_detect-ci-container.yml + uses: ./.github/workflows/_detect-ci-container.yaml test-cmd: name: Run tests for cmd packages runs-on: ubuntu-latest diff --git a/.github/workflows/update-actions.yaml b/.github/workflows/update-actions.yaml index 67fe19dd06..e69de29bb2 100644 --- a/.github/workflows/update-actions.yaml +++ b/.github/workflows/update-actions.yaml @@ -1,64 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: Update Actions version -on: - workflow_dispatch: - schedule: - - cron: "0 0 * * *" -jobs: - dump-contexts-to-log: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - - uses: vdaas/vald/.github/actions/dump-context@main - update-version: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4 - with: - token: ${{ secrets.DISPATCH_TOKEN }} - - name: Set Git config - run: | - git config --global --add safe.directory ${GITHUB_WORKSPACE} - - uses: crazy-max/ghaction-import-gpg@v6 - with: - gpg_private_key: ${{ secrets.GPG_PRIVATE_KEY }} - git_user_signingkey: true - git_commit_gpgsign: true - - name: Run update command - run: | - make update/actions - - name: Check difference - id: check_diff - run: | - if git diff --quiet --exit-code; then - echo "Nothing updated" - else - git diff && git status - echo "HAS_GIT_DIFF=true" >> $GITHUB_OUTPUT - fi - - name: Create PR - if: ${{ steps.check_diff.outputs.HAS_GIT_DIFF == 'true' }} - uses: peter-evans/create-pull-request@v6 - with: - author: "${{ secrets.DISPATCH_USER }} " - token: ${{ secrets.DISPATCH_TOKEN }} - committer: "${{ secrets.DISPATCH_USER }} " - signoff: true - delete-branch: true - base: main - title: "Update Actions dependency" - body: "Automated pull request to update Actions." diff --git a/.github/workflows/update-web-docs.yml b/.github/workflows/update-web-docs.yml index f409b63981..e69de29bb2 100644 --- a/.github/workflows/update-web-docs.yml +++ b/.github/workflows/update-web-docs.yml @@ -1,43 +0,0 @@ -# -# Copyright (C) 2019-2025 vdaas.org vald team -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# You may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# https://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -name: "Update web contents" -on: - workflow_dispatch: - push: - branches: - - main - - "release/v*.*" - tags: - - "*.*.*" - - "v*.*.*" - - "*.*.*-*" - - "v*.*.*-*" - paths: - - "**.md" - - "assets/docs/**" -jobs: - dispatch: - runs-on: ubuntu-latest - steps: - - name: Dispatch - run: | - curl --fail -u "${USER}:${TOKEN}" \ - -X POST https://api.github.com/repos/vdaas/web/dispatches \ - -H 'Accept: application/vnd.github.everest-preview+json' \ - --data '{"event_type": "update-contents"}' - env: - USER: ${{ secrets.DISPATCH_USER }} - TOKEN: ${{ secrets.DISPATCH_TOKEN }} diff --git a/.prh.yaml b/.prh.yaml index 781c07823f..c579f607ec 100644 --- a/.prh.yaml +++ b/.prh.yaml @@ -17,7 +17,6 @@ version: 1 # index: ann # index: besteffort # index: burstable -# index: clojure # index: configmap # index: docker # index: documentation @@ -76,10 +75,6 @@ rules: expected: ConfigMap options: wordBoundary: true - - pattern: clojure - expected: Clojure - options: - wordBoundary: true - pattern: docker expected: Docker options: diff --git a/CHANGELOG.md b/CHANGELOG.md index 7158616e8e..793d462448 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,331 @@ # CHANGELOG +## v1.7.16 + +### Docker images + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
componentDocker pull
Agent NGT + docker pull vdaas/vald-agent-ngt:v1.7.16
+ docker pull ghcr.io/vdaas/vald/vald-agent-ngt:v1.7.16 +
Agent sidecar + docker pull vdaas/vald-agent-sidecar:v1.7.16
+ docker pull ghcr.io/vdaas/vald/vald-agent-sidecar:v1.7.16 +
Discoverers + docker pull vdaas/vald-discoverer-k8s:v1.7.16
+ docker pull ghcr.io/vdaas/vald/vald-discoverer-k8s:v1.7.16 +
Gateways + docker pull vdaas/vald-lb-gateway:v1.7.16
+ docker pull ghcr.io/vdaas/vald/vald-lb-gateway:v1.7.16
+ docker pull vdaas/vald-filter-gateway:v1.7.16
+ docker pull ghcr.io/vdaas/vald/vald-filter-gateway:v1.7.16 +
Index Manager + docker pull vdaas/vald-manager-index:v1.7.16
+ docker pull ghcr.io/vdaas/vald/vald-manager-index:v1.7.16 +
Helm Operator + docker pull vdaas/vald-helm-operator:v1.7.16
+ docker pull ghcr.io/vdaas/vald/vald-helm-operator:v1.7.16 +
+ +### Documents + +- [GoDoc](https://pkg.go.dev/github.com/vdaas/vald@v1.7.16) +- [Helm Chart Reference](https://github.com/vdaas/vald/blob/v1.7.16/charts/vald/README.md) +- [Helm Operator Chart Reference](https://github.com/vdaas/vald/blob/v1.7.16/charts/vald-helm-operator/README.md) + +### Changes + +:recycle: Refactor + +- Fix format of proto files [#2778](https://github.com/vdaas/vald/pull/2778) ([#2783](https://github.com/vdaas/vald/pull/2783)) +- Refactor merge docker and github actions workflow gen logic [#2769](https://github.com/vdaas/vald/pull/2769) ([#2774](https://github.com/vdaas/vald/pull/2774)) + +:pencil2: Document + +- Change symlink API documents [#2741](https://github.com/vdaas/vald/pull/2741) ([#2776](https://github.com/vdaas/vald/pull/2776)) + +:green_heart: CI + +- Refactor github actions [#2773](https://github.com/vdaas/vald/pull/2773) ([#2779](https://github.com/vdaas/vald/pull/2779)) + Change make command [#2765](https://github.com/vdaas/vald/pull/2765) ([#2770](https://github.com/vdaas/vald/pull/2770)) + +:arrow_up: Update dependencies + +- Update libs dependency [#2775](https://github.com/vdaas/vald/pull/2775) ([#2785](https://github.com/vdaas/vald/pull/2785)) + +## v1.7.15 + +### Docker images + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
componentDocker pull
Agent NGT + docker pull vdaas/vald-agent-ngt:v1.7.15
+ docker pull ghcr.io/vdaas/vald/vald-agent-ngt:v1.7.15 +
Agent sidecar + docker pull vdaas/vald-agent-sidecar:v1.7.15
+ docker pull ghcr.io/vdaas/vald/vald-agent-sidecar:v1.7.15 +
Discoverers + docker pull vdaas/vald-discoverer-k8s:v1.7.15
+ docker pull ghcr.io/vdaas/vald/vald-discoverer-k8s:v1.7.15 +
Gateways + docker pull vdaas/vald-lb-gateway:v1.7.15
+ docker pull ghcr.io/vdaas/vald/vald-lb-gateway:v1.7.15
+ docker pull vdaas/vald-filter-gateway:v1.7.15
+ docker pull ghcr.io/vdaas/vald/vald-filter-gateway:v1.7.15 +
Index Manager + docker pull vdaas/vald-manager-index:v1.7.15
+ docker pull ghcr.io/vdaas/vald/vald-manager-index:v1.7.15 +
Helm Operator + docker pull vdaas/vald-helm-operator:v1.7.15
+ docker pull ghcr.io/vdaas/vald/vald-helm-operator:v1.7.15 +
+ +### Documents + +- [GoDoc](https://pkg.go.dev/github.com/vdaas/vald@v1.7.15) +- [Helm Chart Reference](https://github.com/vdaas/vald/blob/v1.7.15/charts/vald/README.md) +- [Helm Operator Chart Reference](https://github.com/vdaas/vald/blob/v1.7.15/charts/vald-helm-operator/README.md) + +### Changes + +:sparkles: New feature + +- Add affinity to jobTemplate [#2758](https://github.com/vdaas/vald/pull/2758) ([#2760](https://github.com/vdaas/vald/pull/2760)) +- feat: Implement delete expired index job [#2702](https://github.com/vdaas/vald/pull/2702) ([#2722](https://github.com/vdaas/vald/pull/2722)) +- Add QUIC support [#1771](https://github.com/vdaas/vald/pull/1771) +- add example-client docker image [#2705](https://github.com/vdaas/vald/pull/2705) ([#2709](https://github.com/vdaas/vald/pull/2709)) + +:recycle: Refactor + +- refactor dockerfiles and update gitattributes [#2743](https://github.com/vdaas/vald/pull/2743) ([#2745](https://github.com/vdaas/vald/pull/2745)) + +:bug: Bugfix + +- :bug: Fix update deps workflow: buf is not found [#2737](https://github.com/vdaas/vald/pull/2737) ([#2739](https://github.com/vdaas/vald/pull/2739)) +- [BUGFIX] resolve agent GetGraphStatistics API double-free error problem [#2733](https://github.com/vdaas/vald/pull/2733) +- fix rust-analyzer [#2731](https://github.com/vdaas/vald/pull/2731) ([#2732](https://github.com/vdaas/vald/pull/2732)) +- Fix installation command for arm64 [#2729](https://github.com/vdaas/vald/pull/2729) ([#2730](https://github.com/vdaas/vald/pull/2730)) +- fix not found error [#2726](https://github.com/vdaas/vald/pull/2726) ([#2727](https://github.com/vdaas/vald/pull/2727)) +- Fix bind DOCKER_OPTS option [#2718](https://github.com/vdaas/vald/pull/2718) ([#2719](https://github.com/vdaas/vald/pull/2719)) + +:pencil2: Document + +- Update README.md [#2724](https://github.com/vdaas/vald/pull/2724) ([#2725](https://github.com/vdaas/vald/pull/2725)) +- :pencil: Remove clj link [#2710](https://github.com/vdaas/vald/pull/2710) ([#2714](https://github.com/vdaas/vald/pull/2714)) + +:green_heart: CI + +- :green_heart: Multi-PF build for example-client [#2713](https://github.com/vdaas/vald/pull/2713) +- Add auto deps version update workflow [#2707](https://github.com/vdaas/vald/pull/2707) ([#2717](https://github.com/vdaas/vald/pull/2717)) + +:arrow_up: Update dependencies + +- :green_heart: use ci-container for update deps cron job [#2744](https://github.com/vdaas/vald/pull/2744) ([#2748](https://github.com/vdaas/vald/pull/2748)) +- update ubuntu version for devcontainer [#2736](https://github.com/vdaas/vald/pull/2736) ([#2750](https://github.com/vdaas/vald/pull/2750)) +- :arrow_up: update versions/BUF_VERSION [#2703](https://github.com/vdaas/vald/pull/2703) ([#2704](https://github.com/vdaas/vald/pull/2704)) + +:handshake: Contributor + +- docs: add highpon as a contributor for code [#2721](https://github.com/vdaas/vald/pull/2721) + +## v1.7.14 + +### Docker images + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
componentDocker pull
Agent NGT + docker pull vdaas/vald-agent-ngt:v1.7.14
+ docker pull ghcr.io/vdaas/vald/vald-agent-ngt:v1.7.14 +
Agent sidecar + docker pull vdaas/vald-agent-sidecar:v1.7.14
+ docker pull ghcr.io/vdaas/vald/vald-agent-sidecar:v1.7.14 +
Discoverers + docker pull vdaas/vald-discoverer-k8s:v1.7.14
+ docker pull ghcr.io/vdaas/vald/vald-discoverer-k8s:v1.7.14 +
Gateways + docker pull vdaas/vald-lb-gateway:v1.7.14
+ docker pull ghcr.io/vdaas/vald/vald-lb-gateway:v1.7.14
+ docker pull vdaas/vald-filter-gateway:v1.7.14
+ docker pull ghcr.io/vdaas/vald/vald-filter-gateway:v1.7.14 +
Index Manager + docker pull vdaas/vald-manager-index:v1.7.14
+ docker pull ghcr.io/vdaas/vald/vald-manager-index:v1.7.14 +
Helm Operator + docker pull vdaas/vald-helm-operator:v1.7.14
+ docker pull ghcr.io/vdaas/vald/vald-helm-operator:v1.7.14 +
+ +### Documents + +- [GoDoc](https://pkg.go.dev/github.com/vdaas/vald@v1.7.14) +- [Helm Chart Reference](https://github.com/vdaas/vald/blob/v1.7.14/charts/vald/README.md) +- [Helm Operator Chart Reference](https://github.com/vdaas/vald/blob/v1.7.14/charts/vald-helm-operator/README.md) + +### Changes + +### :sparkles: New feature + +- Add String sorted topologicalSort [#2696](https://github.com/vdaas/vald/pull/2696) [#2698](https://github.com/vdaas/vald/pull/2698) +- Add CPU_INFO_FLAGS for Apple Silicon [#2694](https://github.com/vdaas/vald/pull/2694) [#2697](https://github.com/vdaas/vald/pull/2697) +- Add New gRPC Options and Add Reconnect Logic for connection Pool [#2685](https://github.com/vdaas/vald/pull/2685) [#2693](https://github.com/vdaas/vald/pull/2693) +- Add option to disable dns resolve [#2634](https://github.com/vdaas/vald/pull/2634) [#2641](https://github.com/vdaas/vald/pull/2641) +- Backport PR #2584 to release/v1.7 for Implement ngt property get API [#2588](https://github.com/vdaas/vald/pull/2588) +- add HTTP2 support for http.Client and Vald HTTP Server [#2572](https://github.com/vdaas/vald/pull/2572) [#2575](https://github.com/vdaas/vald/pull/2575) + +### :zap: Improve performance + +- Refactor grpc/status.withDetails function for performance [#2664](https://github.com/vdaas/vald/pull/2664) [#2668](https://github.com/vdaas/vald/pull/2668) + +### :recycle: Refactor + +- Refactor use Absolute path for Makefile [#2673](https://github.com/vdaas/vald/pull/2673) +- Refactor internal/net/grpc/client.go [#2675](https://github.com/vdaas/vald/pull/2675) +- modify ParseError to FromError for agent handler [#2667](https://github.com/vdaas/vald/pull/2667) [#2679](https://github.com/vdaas/vald/pull/2679) +- Backport PR #2674 to release/v1.7 for Refactor internal/net/grpc/client.go [#2675](https://github.com/vdaas/vald/pull/2675) +- Backport PR #2670 to release/v1.7 for Refactor use Absolute path for Makefile [#2673](https://github.com/vdaas/vald/pull/2673) +- Refactor grpc/status.withDetails function for performance [#2664](https://github.com/vdaas/vald/pull/2664) [#2668](https://github.com/vdaas/vald/pull/2668) +- Refactor for release v1.7.14 [#2639](https://github.com/vdaas/vald/pull/2639) [#2648](https://github.com/vdaas/vald/pull/2648) +- refactor(gateway): delete unused file [#2644](https://github.com/vdaas/vald/pull/2644) [#2646](https://github.com/vdaas/vald/pull/2646) +- Refactor test checkFunc condition [#2599](https://github.com/vdaas/vald/pull/2599) [#2602](https://github.com/vdaas/vald/pull/2602) +- Backport PR #2586 to release/v1.7 for modify rust package structure [#2590](https://github.com/vdaas/vald/pull/2590) +- Backport PR #2577 to release/v1.7 for refactor docker and change buildkit-syft-scanner reference to ghcr.io [#2578](https://github.com/vdaas/vald/pull/2578) + +### :bug: Bugfix + +- Fix gRPC error handling for gateway/filter handler [#2669](https://github.com/vdaas/vald/pull/2669) [#2689](https://github.com/vdaas/vald/pull/2689) +- fix: increase limit [#2683](https://github.com/vdaas/vald/pull/2683) [#2686](https://github.com/vdaas/vald/pull/2686) +- Fix gRPC error handling for mirror-gateway handler [#2665](https://github.com/vdaas/vald/pull/2665) [#2681](https://github.com/vdaas/vald/pull/2681) +- Fix gRPC error msg handling for lb-gateway handler [#2663](https://github.com/vdaas/vald/pull/2663) [#2682](https://github.com/vdaas/vald/pull/2682) +- Bugfix ingress route settings [#2636](https://github.com/vdaas/vald/pull/2636) [#2642](https://github.com/vdaas/vald/pull/2642) +- Fix broken links in the document files [#2611](https://github.com/vdaas/vald/pull/2611) [#2614](https://github.com/vdaas/vald/pull/2614) +- Fix: make command name [#2610](https://github.com/vdaas/vald/pull/2610) [#2612](https://github.com/vdaas/vald/pull/2612) +- Bugfix NGT flush logic [#2598](https://github.com/vdaas/vald/pull/2598) [#2606](https://github.com/vdaas/vald/pull/2606) + +### :pencil2: Document + +- Fix broken links in the document files [#2611](https://github.com/vdaas/vald/pull/2611) [#2614](https://github.com/vdaas/vald/pull/2614) + +### :white_check_mark: Testing + +- Refactor test checkFunc condition [#2599](https://github.com/vdaas/vald/pull/2599) [#2602](https://github.com/vdaas/vald/pull/2602) + +### :green_heart: CI + +- Buf CLI migrate to v2 [#2691](https://github.com/vdaas/vald/pull/2691) [#2695](https://github.com/vdaas/vald/pull/2695) +- [create-pull-request] automated change [#2677](https://github.com/vdaas/vald/pull/2677) [#2678](https://github.com/vdaas/vald/pull/2678) +- automatically generate workflows [#2595](https://github.com/vdaas/vald/pull/2595) [#2603](https://github.com/vdaas/vald/pull/2603) + +### :chart_with_upwards_trend: Metrics/Tracing + +- Introduce an observability crate using opentelemetry-rust [#2535](https://github.com/vdaas/vald/pull/2535) [#2609](https://github.com/vdaas/vald/pull/2609) + + + +## Summary by CodeRabbit + +- **New Features** + + - Added several new contributors to the project, enhancing community involvement. + - Introduced a new configuration file for spell checking, improving documentation quality. + - Expanded the project with new configuration files, documentation, and source code for enhanced functionality. + +- **Bug Fixes** + + - Updated version information in issue templates for accuracy. + +- **Documentation** + + - Improved clarity in the pull request template and updated version information. + +- **Chores** + - Modified GitHub Actions for better handling of Docker image tags. + + + ## v1.7.13 ### Docker images @@ -5882,7 +6208,7 @@ - update dependencies version ([#381](https://github.com/vdaas/vald/pull/381)) - Fix missing value on compressor health servers ([#377](https://github.com/vdaas/vald/pull/377)) - Fix compressor readiness shutdown_duration / Fix cassandra … ([#376](https://github.com/vdaas/vald/pull/376)) -- Bump gopkg.in/yaml.v2 from 2.2.8 to 2.3.0 ([#375](https://github.com/vdaas/vald/pull/375)) +- Bump sigs.k8s.io/yaml from 2.2.8 to 2.3.0 ([#375](https://github.com/vdaas/vald/pull/375)) - Fix`internal/log/format` to match the test template ([#369](https://github.com/vdaas/vald/pull/369)) - Fix `internal/log/logger` to match the test template ([#371](https://github.com/vdaas/vald/pull/371)) - Fix failing tests of `internal/log` and modified to match the test template ([#368](https://github.com/vdaas/vald/pull/368)) diff --git a/Makefile b/Makefile index 4249e8c35a..73645e3120 100644 --- a/Makefile +++ b/Makefile @@ -24,8 +24,8 @@ TAG ?= latest CRORG ?= $(ORG) GHCRORG = ghcr.io/$(REPO) AGENT_IMAGE = $(NAME)-agent -AGENT_NGT_IMAGE = $(AGENT_IMAGE)-ngt AGENT_FAISS_IMAGE = $(AGENT_IMAGE)-faiss +AGENT_NGT_IMAGE = $(AGENT_IMAGE)-ngt AGENT_SIDECAR_IMAGE = $(AGENT_IMAGE)-sidecar BENCHMARK_JOB_IMAGE = $(NAME)-benchmark-job BENCHMARK_OPERATOR_IMAGE = $(NAME)-benchmark-operator @@ -36,10 +36,12 @@ BUILDKIT_SYFT_SCANNER_IMAGE = $(BUILDKIT_IMAGE)-syft-scanner CI_CONTAINER_IMAGE = $(NAME)-ci-container DEV_CONTAINER_IMAGE = $(NAME)-dev-container DISCOVERER_IMAGE = $(NAME)-discoverer-k8s +EXAMPLE_CLIENT_IMAGE = $(NAME)-example-client FILTER_GATEWAY_IMAGE = $(NAME)-filter-gateway HELM_OPERATOR_IMAGE = $(NAME)-helm-operator INDEX_CORRECTION_IMAGE = $(NAME)-index-correction INDEX_CREATION_IMAGE = $(NAME)-index-creation +INDEX_DELETION_IMAGE = $(NAME)-index-deletion INDEX_OPERATOR_IMAGE = $(NAME)-index-operator INDEX_SAVE_IMAGE = $(NAME)-index-save LB_GATEWAY_IMAGE = $(NAME)-lb-gateway @@ -49,6 +51,10 @@ MIRROR_GATEWAY_IMAGE = $(NAME)-mirror-gateway READREPLICA_ROTATE_IMAGE = $(NAME)-readreplica-rotate MAINTAINER = "$(ORG).org $(NAME) team <$(NAME)@$(ORG).org>" +DEADLINK_CHECK_PATH ?= "" +DEADLINK_IGNORE_PATH ?= "" +DEADLINK_CHECK_FORMAT = html + DEFAULT_BUILDKIT_SYFT_SCANNER_IMAGE = $(GHCRORG)/$(BUILDKIT_SYFT_SCANNER_IMAGE):nightly VERSION ?= $(eval VERSION := $(shell cat versions/VALD_VERSION))$(VERSION) @@ -85,6 +91,7 @@ BUF_VERSION := $(eval BUF_VERSION := $(shell cat versions/BUF_VERS CMAKE_VERSION := $(eval CMAKE_VERSION := $(shell cat versions/CMAKE_VERSION))$(CMAKE_VERSION) DOCKER_VERSION := $(eval DOCKER_VERSION := $(shell cat versions/DOCKER_VERSION))$(DOCKER_VERSION) FAISS_VERSION := $(eval FAISS_VERSION := $(shell cat versions/FAISS_VERSION))$(FAISS_VERSION) +USEARCH_VERSION := $(eval USEARCH_VERSION := $(shell cat versions/USEARCH_VERSION))$(USEARCH_VERSION) GOLANGCILINT_VERSION := $(eval GOLANGCILINT_VERSION := $(shell cat versions/GOLANGCILINT_VERSION))$(GOLANGCILINT_VERSION) GO_VERSION := $(eval GO_VERSION := $(shell cat versions/GO_VERSION))$(GO_VERSION) HDF5_VERSION := $(eval HDF5_VERSION := $(shell cat versions/HDF5_VERSION))$(HDF5_VERSION) @@ -103,7 +110,6 @@ PROTOBUF_VERSION := $(eval PROTOBUF_VERSION := $(shell cat versions/PRO REVIEWDOG_VERSION := $(eval REVIEWDOG_VERSION := $(shell cat versions/REVIEWDOG_VERSION))$(REVIEWDOG_VERSION) RUST_VERSION := $(eval RUST_VERSION := $(shell cat versions/RUST_VERSION))$(RUST_VERSION) TELEPRESENCE_VERSION := $(eval TELEPRESENCE_VERSION := $(shell cat versions/TELEPRESENCE_VERSION))$(TELEPRESENCE_VERSION) -VALDCLI_VERSION := $(eval VALDCLI_VERSION := $(shell cat versions/VALDCLI_VERSION))$(VALDCLI_VERSION) YQ_VERSION := $(eval YQ_VERSION := $(shell cat versions/YQ_VERSION))$(YQ_VERSION) ZLIB_VERSION := $(eval ZLIB_VERSION := $(shell cat versions/ZLIB_VERSION))$(ZLIB_VERSION) @@ -123,7 +129,7 @@ ifeq ($(UNAME),Linux) CPU_INFO_FLAGS := $(eval CPU_INFO_FLAGS := $(shell cat /proc/cpuinfo | grep flags | cut -d " " -f 2- | head -1))$(CPU_INFO_FLAGS) CORES := $(eval CORES := $(shell nproc 2>/dev/null || getconf _NPROCESSORS_ONLN 2>/dev/null))$(CORES) else ifeq ($(UNAME),Darwin) -CPU_INFO_FLAGS := "" +CPU_INFO_FLAGS := $(eval CPU_INFO_FLAGS := $(shell sysctl -n machdep.cpu.brand_string 2>/dev/null || echo "Apple Silicon"))$(CPU_INFO_FLAGS) CORES := $(eval CORES := $(shell sysctl -n hw.ncpu 2>/dev/null || getconf _NPROCESSORS_ONLN 2>/dev/null))$(CORES) else CPU_INFO_FLAGS := "" @@ -135,18 +141,22 @@ GIT_COMMIT := $(eval GIT_COMMIT := $(shell git rev-list -1 HEAD))$(GIT_COMMIT) MAKELISTS := Makefile $(shell find Makefile.d -type f -regex ".*\.mk") ROOTDIR = $(eval ROOTDIR := $(or $(shell git rev-parse --show-toplevel), $(PWD)))$(ROOTDIR) -PROTODIRS := $(eval PROTODIRS := $(shell find apis/proto -type d | sed -e "s%apis/proto/%%g" | grep -v "apis/proto"))$(PROTODIRS) +PROTODIRS := $(eval PROTODIRS := $(shell find $(ROOTDIR)/apis/proto -type d | sed -e "s%apis/proto/%%g" | grep -v "apis/proto"))$(PROTODIRS) BENCH_DATASET_BASE_DIR = hack/benchmark/assets BENCH_DATASET_MD5_DIR_NAME = checksum BENCH_DATASET_HDF5_DIR_NAME = dataset BENCH_DATASET_MD5_DIR = $(BENCH_DATASET_BASE_DIR)/$(BENCH_DATASET_MD5_DIR_NAME) BENCH_DATASET_HDF5_DIR = $(BENCH_DATASET_BASE_DIR)/$(BENCH_DATASET_HDF5_DIR_NAME) -PROTOS := $(eval PROTOS := $(shell find apis/proto -type f -regex ".*\.proto"))$(PROTOS) +PROTOS := $(eval PROTOS := $(shell find $(ROOTDIR)/apis/proto -type f -regex ".*\.proto"))$(PROTOS) PROTOS_V1 := $(eval PROTOS_V1 := $(filter apis/proto/v1/%.proto,$(PROTOS)))$(PROTOS_V1) PBGOS = $(PROTOS:apis/proto/%.proto=apis/grpc/%.pb.go) SWAGGERS = $(PROTOS:apis/proto/%.proto=apis/swagger/%.swagger.json) -PBDOCS = apis/docs/v1/docs.md +PBDOCS = $(ROOTDIR)/apis/docs/v1/docs.md +PROTO_VALD_APIS := $(eval PROTO_VALD_APIS := $(filter $(ROOTDIR)/apis/proto/v1/vald/%.proto,$(PROTOS)))$(PROTO_VALD_APIS) +PROTO_VALD_API_DOCS := $(PROTO_VALD_APIS:$(ROOTDIR)/apis/proto/v1/vald/%.proto=$(ROOTDIR)/apis/docs/v1/%.md) +PROTO_MIRROR_APIS := $(eval PROTO_MIRROR_APIS := $(filter $(ROOTDIR)/apis/proto/v1/mirror/%.proto,$(PROTOS)))$(PROTO_MIRROR_APIS) +PROTO_MIRROR_API_DOCS := $(PROTO_MIRROR_APIS:$(ROOTDIR)/apis/proto/v1/mirror/%.proto=$(ROOTDIR)/apis/docs/v1/%.md) LDFLAGS = -static -fPIC -pthread -std=gnu++23 -lstdc++ -lm -z relro -z now -flto=auto -march=native -mtune=native -fno-plt -Ofast -fvisibility=hidden -ffp-contract=fast -fomit-frame-pointer -fmerge-all-constants -funroll-loops -falign-functions=32 -ffunction-sections -fdata-sections @@ -390,6 +400,28 @@ help: { lastLine = $$0 }' $(MAKELISTS) | sort -u @printf "\n" +.PHONY: perm +## set correct permissions for dirs and files +perm: + find $(ROOTDIR) -type d -not -path "$(ROOTDIR)/.git*" -exec chmod 755 {} \; + find $(ROOTDIR) -type f -not -path "$(ROOTDIR)/.git*" -not -name ".gitignore" -exec chmod 644 {} \; + if [ -d "$(ROOTDIR)/.git" ]; then \ + chmod 750 "$(ROOTDIR)/.git"; \ + if [ -f "$(ROOTDIR)/.git/config" ]; then \ + chmod 644 "$(ROOTDIR)/.git/config"; \ + fi; \ + if [ -d "$(ROOTDIR)/.git/hooks" ]; then \ + find "$(ROOTDIR)/.git/hooks" -type f -exec chmod 755 {} \;; \ + fi; \ + fi + if [ -f "$(ROOTDIR)/.gitignore" ]; then \ + chmod 644 "$(ROOTDIR)/.gitignore"; \ + fi + if [ -f "$(ROOTDIR)/.gitattributes" ]; then \ + chmod 644 "$(ROOTDIR)/.gitattributes"; \ + fi + + .PHONY: all ## execute clean and deps all: clean deps @@ -442,6 +474,16 @@ license: dockerfile: $(call gen-dockerfile,$(ROOTDIR),$(MAINTAINER)) +.PHONY: workflow +## generate workflows +workflow: + $(call gen-dockerfile,$(ROOTDIR),$(MAINTAINER)) + +.PHONY: deadlink-checker +## generate deadlink-checker +deadlink-checker: + $(call gen-deadlink-checker,$(ROOTDIR),$(MAINTAINER),$(DEADLINK_CHECK_PATH),$(DEADLINK_IGNORE_PATH),$(DEADLINK_CHECK_FORMAT)) + .PHONY: init ## initialize development environment init: \ @@ -455,7 +497,6 @@ init: \ tools/install: \ helm/install \ kind/install \ - valdcli/install \ telepresence/install \ textlint/install @@ -469,6 +510,7 @@ update: \ deps \ update/template \ go/deps \ + go/example/deps \ rust/deps \ format @@ -598,6 +640,11 @@ version/ngt: version/faiss: @echo $(FAISS_VERSION) +.PHONY: version/usearch +## print usearch version +version/usearch: + @echo $(USEARCH_VERSION) + .PHONY: version/docker ## print Kubernetes version version/docker: @@ -620,10 +667,6 @@ version/helm: version/yq: @echo $(YQ_VERSION) -.PHONY: version/valdcli -version/valdcli: - @echo $(VALDCLI_VERSION) - .PHONY: version/telepresence version/telepresence: @echo $(TELEPRESENCE_VERSION) @@ -660,7 +703,7 @@ $(LIB_PATH)/libfaiss.a: -DBUILD_STATIC_EXECS=ON \ -DBUILD_TESTING=OFF \ -DFAISS_ENABLE_PYTHON=OFF \ - -DFAISS_ENABLE_GPU=OFF \ + -DFAISS_ENABLE_GPU=OFF \ -DBLA_VENDOR=OpenBLAS \ -DCMAKE_C_FLAGS="$(LDFLAGS)" \ -DCMAKE_EXE_LINKER_FLAGS="$(FAISS_LDFLAGS)" \ @@ -672,6 +715,35 @@ $(LIB_PATH)/libfaiss.a: rm -rf $(TEMP_DIR)/v$(FAISS_VERSION).tar.gz $(TEMP_DIR)/faiss-$(FAISS_VERSION) ldconfig +.PHONY: usearch/install +## install usearch +usearch/install: $(USR_LOCAL)/include/usearch.h +$(USR_LOCAL)/include/usearch.h: + git clone --depth 1 --recursive --branch v$(USEARCH_VERSION) https://github.com/unum-cloud/usearch $(TEMP_DIR)/usearch-$(USEARCH_VERSION) + cd $(TEMP_DIR)/usearch-$(USEARCH_VERSION) && \ + cmake -DCMAKE_BUILD_TYPE=Release \ + -DBUILD_SHARED_LIBS=OFF \ + -DBUILD_TESTING=OFF \ + -DUSEARCH_BUILD_LIB_C=ON \ + -DUSEARCH_USE_FP16LIB=ON \ + -DUSEARCH_USE_OPENMP=ON \ + -DUSEARCH_USE_SIMSIMD=ON \ + -DUSEARCH_USE_JEMALLOC=ON \ + -DCMAKE_C_FLAGS="$(CFLAGS)" \ + -DCMAKE_CXX_FLAGS="$(CXXFLAGS)" \ + -DCMAKE_INSTALL_PREFIX=$(USR_LOCAL) \ + -DCMAKE_INSTALL_LIBDIR=$(LIB_PATH) \ + -B $(TEMP_DIR)/usearch-$(USEARCH_VERSION)/build $(TEMP_DIR)/usearch-$(USEARCH_VERSION) + cmake --build $(TEMP_DIR)/usearch-$(USEARCH_VERSION)/build -j$(CORES) + cmake --install $(TEMP_DIR)/usearch-$(USEARCH_VERSION)/build --prefix=$(USR_LOCAL) + cd $(ROOTDIR) + cp $(TEMP_DIR)/usearch-$(USEARCH_VERSION)/build/libusearch_static_c.a $(LIB_PATH)/libusearch_c.a + cp $(TEMP_DIR)/usearch-$(USEARCH_VERSION)/build/libusearch_static_c.a $(LIB_PATH)/libusearch_static_c.a + cp $(TEMP_DIR)/usearch-$(USEARCH_VERSION)/build/libusearch_c.so $(LIB_PATH)/libusearch_c.so + cp $(TEMP_DIR)/usearch-$(USEARCH_VERSION)/c/usearch.h $(USR_LOCAL)/include/usearch.h + rm -rf $(TEMP_DIR)/usearch-$(USEARCH_VERSION) + ldconfig + .PHONY: cmake/install ## install CMAKE cmake/install: @@ -689,7 +761,6 @@ cmake/install: cd $(ROOTDIR) rm -rf $(TEMP_DIR)/CMAKE-$(CMAKE_VERSION) ldconfig - # -DCMAKE_USE_OPENSSL=OFF .PHONY: lint ## run lints @@ -737,14 +808,14 @@ files/textlint: \ ## run cspell for document docs/cspell:\ cspell/install - cspell-cli $(ROOTDIR)/docs/**/*.md --show-suggestions $(CSPELL_EXTRA_OPTIONS) + cspell $(ROOTDIR)/docs/**/*.md --show-suggestions $(CSPELL_EXTRA_OPTIONS) .PHONY: files/cspell ## run cspell for document files/cspell: \ files \ cspell/install - cspell-cli $(ROOTDIR)/.gitfiles --show-suggestions $(CSPELL_EXTRA_OPTIONS) + cspell $(ROOTDIR)/.gitfiles --show-suggestions $(CSPELL_EXTRA_OPTIONS) .PHONY: changelog/update ## update changelog @@ -753,20 +824,19 @@ changelog/update: echo "" >> $(TEMP_DIR)/CHANGELOG.md $(MAKE) -s changelog/next/print >> $(TEMP_DIR)/CHANGELOG.md echo "" >> $(TEMP_DIR)/CHANGELOG.md - tail -n +2 CHANGELOG.md >> $(TEMP_DIR)/CHANGELOG.md - mv -f $(TEMP_DIR)/CHANGELOG.md CHANGELOG.md + tail -n +2 $(ROOTDIR)/CHANGELOG.md >> $(TEMP_DIR)/CHANGELOG.md + mv -f $(TEMP_DIR)/CHANGELOG.md $(ROOTDIR)/CHANGELOG.md .PHONY: changelog/next/print ## print next changelog entry changelog/next/print: - @cat hack/CHANGELOG.template.md | \ + @cat $(ROOTDIR)/hack/CHANGELOG.template.md | \ sed -e 's/{{ version }}/$(VERSION)/g' @echo "$$BODY" include Makefile.d/actions.mk include Makefile.d/bench.mk include Makefile.d/build.mk -include Makefile.d/client.mk include Makefile.d/dependencies.mk include Makefile.d/docker.mk include Makefile.d/e2e.mk @@ -775,7 +845,7 @@ include Makefile.d/helm.mk include Makefile.d/k3d.mk include Makefile.d/k8s.mk include Makefile.d/kind.mk +include Makefile.d/minikube.mk include Makefile.d/proto.mk include Makefile.d/test.mk include Makefile.d/tools.mk -include Makefile.d/minikube.mk diff --git a/Makefile.d/build.mk b/Makefile.d/build.mk index 5ff2473875..e5e175c63c 100644 --- a/Makefile.d/build.mk +++ b/Makefile.d/build.mk @@ -24,6 +24,7 @@ binary/build: \ cmd/gateway/mirror/mirror \ cmd/index/job/correction/index-correction \ cmd/index/job/creation/index-creation \ + cmd/index/job/deletion/index-deletion \ cmd/index/job/readreplica/rotate/readreplica-rotate \ cmd/index/job/save/index-save \ cmd/index/operator/index-operator \ @@ -31,6 +32,7 @@ binary/build: \ cmd/tools/benchmark/job/job \ cmd/tools/benchmark/operator/operator \ cmd/tools/cli/loadtest/loadtest \ + example/client/client \ cmd/agent/core/ngt/ngt \ cmd/agent/core/faiss/faiss \ rust/target/debug/agent \ @@ -79,6 +81,10 @@ cmd/index/job/creation/index-creation: $(eval CGO_ENABLED = 0) $(call go-build,index/job/creation,,-static,,,$@) +cmd/index/job/deletion/index-deletion: + $(eval CGO_ENABLED = 0) + $(call go-build,index/job/deletion,,-static,,,$@) + cmd/index/job/save/index-save: $(eval CGO_ENABLED = 0) $(call go-build,index/job/save,,-static,,,$@) @@ -103,6 +109,10 @@ cmd/tools/cli/loadtest/loadtest: $(eval CGO_ENABLED = 1) $(call go-build,tools/cli/loadtest,-linkmode 'external',$(LDFLAGS) $(HDF5_LDFLAGS), cgo,$(HDF5_VERSION),$@) +example/client/client: + $(eval CGO_ENABLED = 1) + $(call go-example-build,example/client,-linkmode 'external',$(LDFLAGS) $(HDF5_LDFLAGS), cgo,$(HDF5_VERSION),$@) + rust/target/release/agent: pushd rust && cargo build -p agent --release && popd @@ -119,9 +129,11 @@ binary/build/zip: \ artifacts/vald-benchmark-operator-$(GOOS)-$(GOARCH).zip \ artifacts/vald-cli-loadtest-$(GOOS)-$(GOARCH).zip \ artifacts/vald-discoverer-k8s-$(GOOS)-$(GOARCH).zip \ + artifacts/vald-example-client-$(GOOS)-$(GOARCH).zip \ artifacts/vald-filter-gateway-$(GOOS)-$(GOARCH).zip \ artifacts/vald-index-correction-$(GOOS)-$(GOARCH).zip \ artifacts/vald-index-creation-$(GOOS)-$(GOARCH).zip \ + artifacts/vald-index-deletion-$(GOOS)-$(GOARCH).zip \ artifacts/vald-index-operator-$(GOOS)-$(GOARCH).zip \ artifacts/vald-index-save-$(GOOS)-$(GOARCH).zip \ artifacts/vald-lb-gateway-$(GOOS)-$(GOARCH).zip \ @@ -181,6 +193,10 @@ artifacts/vald-index-creation-$(GOOS)-$(GOARCH).zip: cmd/index/job/creation/inde $(call mkdir, $(dir $@)) zip --junk-paths $@ $< +artifacts/vald-index-deletion-$(GOOS)-$(GOARCH).zip: cmd/index/job/deletion/index-deletion + $(call mkdir, $(dir $@)) + zip --junk-paths $@ $< + artifacts/vald-index-save-$(GOOS)-$(GOARCH).zip: cmd/index/job/save/index-save $(call mkdir, $(dir $@)) zip --junk-paths $@ $< @@ -192,3 +208,7 @@ artifacts/vald-readreplica-rotate-$(GOOS)-$(GOARCH).zip: cmd/index/job/readrepli artifacts/vald-index-operator-$(GOOS)-$(GOARCH).zip: cmd/index/operator/index-operator $(call mkdir, $(dir $@)) zip --junk-paths $@ $< + +artifacts/vald-example-client-$(GOOS)-$(GOARCH).zip: example/client/client + $(call mkdir, $(dir $@)) + zip --junk-paths $@ $< diff --git a/Makefile.d/dependencies.mk b/Makefile.d/dependencies.mk index 0aae8fe505..a5a95e7c6f 100644 --- a/Makefile.d/dependencies.mk +++ b/Makefile.d/dependencies.mk @@ -17,6 +17,7 @@ .PHONY: update/libs ## update vald libraries including tools update/libs: \ + update/buf \ update/chaos-mesh \ update/cmake \ update/docker \ @@ -38,8 +39,8 @@ update/libs: \ update/reviewdog \ update/rust \ update/telepresence \ + update/usearch \ update/vald \ - update/valdcli \ update/yq \ update/zlib @@ -52,6 +53,7 @@ go/download: ## install Go package dependencies go/deps: \ update/go + head -n -1 $(ROOTDIR)/hack/go.mod.default | awk 'NR>=6 && $$0 !~ /(upgrade|latest|master|main)/' | sort sed -i "3s/go [0-9]\+\.[0-9]\+\(\.[0-9]\+\)\?/go $(GO_VERSION)/g" $(ROOTDIR)/hack/go.mod.default if $(GO_CLEAN_DEPS); then \ rm -rf $(ROOTDIR)/vendor \ @@ -152,6 +154,11 @@ update/helm-docs: update/protobuf: curl -fsSL https://api.github.com/repos/protocolbuffers/protobuf/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' | sed 's/v//g' > $(ROOTDIR)/versions/PROTOBUF_VERSION +.PHONY: update/buf +## update buf version +update/buf: + curl -fsSL https://api.github.com/repos/bufbuild/buf/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' > $(ROOTDIR)/versions/BUF_VERSION + .PHONY: update/kind ## update kind (kubernetes in docker) version update/kind: @@ -192,6 +199,11 @@ update/ngt: update/faiss: curl -fsSL https://api.github.com/repos/facebookresearch/faiss/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' | sed 's/v//g' > $(ROOTDIR)/versions/FAISS_VERSION +.PHONY: update/usearch +## update usearch version +update/usearch: + curl -fsSL https://api.github.com/repos/unum-cloud/usearch/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' | sed 's/v//g' > $(ROOTDIR)/versions/USEARCH_VERSION + .PHONY: update/cmake ## update CMAKE version update/cmake: @@ -227,11 +239,6 @@ update/hdf5: update/vald: curl -fsSL https://api.github.com/repos/$(REPO)/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' > $(ROOTDIR)/versions/VALD_VERSION -.PHONY: update/valdcli -## update vald client library made by clojure self version -update/valdcli: - curl -fsSL https://api.github.com/repos/$(REPO)-client-clj/releases/latest | grep -Po '"tag_name": "\K.*?(?=")' > $(ROOTDIR)/versions/VALDCLI_VERSION - .PHONY: update/template ## update PULL_REQUEST_TEMPLATE and ISSUE_TEMPLATE update/template: diff --git a/Makefile.d/docker.mk b/Makefile.d/docker.mk index 785a655c03..e4bb5b385d 100644 --- a/Makefile.d/docker.mk +++ b/Makefile.d/docker.mk @@ -29,18 +29,49 @@ docker/build: \ docker/build/ci-container \ docker/build/dev-container \ docker/build/discoverer-k8s \ + docker/build/example-client \ docker/build/gateway-filter \ docker/build/gateway-lb \ docker/build/gateway-mirror \ + docker/build/helm-operator \ docker/build/index-correction \ docker/build/index-creation \ + docker/build/index-deletion \ docker/build/index-operator \ docker/build/index-save \ docker/build/loadtest \ docker/build/manager-index \ - docker/build/operator/helm \ docker/build/readreplica-rotate +docker/xpanes/build: + @xpanes -s -c "make -f $(ROOTDIR)/Makefile {}" \ + docker/build/agent \ + docker/build/agent-faiss \ + docker/build/agent-ngt \ + docker/build/agent-sidecar \ + docker/build/benchmark-job \ + docker/build/benchmark-operator \ + docker/build/binfmt \ + docker/build/buildbase \ + docker/build/buildkit \ + docker/build/buildkit-syft-scanner \ + docker/build/ci-container \ + docker/build/dev-container \ + docker/build/discoverer-k8s \ + docker/build/example-client \ + docker/build/gateway-filter \ + docker/build/gateway-lb \ + docker/build/gateway-mirror \ + docker/build/index-correction \ + docker/build/index-creation \ + docker/build/index-deletion \ + docker/build/index-operator \ + docker/build/index-save \ + docker/build/loadtest \ + docker/build/manager-index \ + docker/build/operator/helm \ + docker/build/readreplica-rotate + .PHONY: docker/name/org docker/name/org: @echo "$(ORG)" @@ -74,7 +105,7 @@ ifeq ($(REMOTE),true) -t $(GHCRORG)/$(IMAGE):$(TAG) \ $(EXTRA_ARGS) \ --output type=registry,oci-mediatypes=true,compression=zstd,compression-level=5,force-compression=true,push=true \ - -f $(DOCKERFILE) . + -f $(DOCKERFILE) $(ROOTDIR) else @echo "starting local build for $(IMAGE):$(TAG)" DOCKER_BUILDKIT=1 $(DOCKER) build \ @@ -86,7 +117,7 @@ else $(EXTRA_ARGS) \ -t $(CRORG)/$(IMAGE):$(TAG) \ -t $(GHCRORG)/$(IMAGE):$(TAG) \ - -f $(DOCKERFILE) . + -f $(DOCKERFILE) $(ROOTDIR) endif .PHONY: docker/name/agent-ngt @@ -255,13 +286,13 @@ docker/build/dev-container: IMAGE=$(DEV_CONTAINER_IMAGE) \ docker/build/image -.PHONY: docker/name/operator/helm -docker/name/operator/helm: +.PHONY: docker/name/helm-operator +docker/name/helm-operator: @echo "$(ORG)/$(HELM_OPERATOR_IMAGE)" -.PHONY: docker/build/operator/helm +.PHONY: docker/build/helm-operator ## build helm-operator image -docker/build/operator/helm: +docker/build/helm-operator: @make DOCKERFILE="$(ROOTDIR)/dockers/operator/helm/Dockerfile" \ IMAGE=$(HELM_OPERATOR_IMAGE) \ EXTRA_ARGS="--build-arg OPERATOR_SDK_VERSION=$(OPERATOR_SDK_VERSION) --build-arg UPX_OPTIONS=$(UPX_OPTIONS) $(EXTRA_ARGS)" \ @@ -275,7 +306,7 @@ docker/name/loadtest: ## build loadtest image docker/build/loadtest: @make DOCKERFILE="$(ROOTDIR)/dockers/tools/cli/loadtest/Dockerfile" \ - DOCKER_OPTS="--build-arg ZLIB_VERSION=$(ZLIB_VERSION) --build-arg HDF5_VERSION=$(HDF5_VERSION)" \ + DOCKER_OPTS="$${DOCKER_OPTS:+$${DOCKER_OPTS}} --build-arg ZLIB_VERSION=$(ZLIB_VERSION) --build-arg HDF5_VERSION=$(HDF5_VERSION)" \ IMAGE=$(LOADTEST_IMAGE) \ docker/build/image @@ -312,6 +343,17 @@ docker/build/index-save: IMAGE=$(INDEX_SAVE_IMAGE) \ docker/build/image +.PHONY: docker/name/index-deletion +docker/name/index-deletion: + @echo "$(ORG)/$(INDEX_DELETION_IMAGE)" + +.PHONY: docker/build/index-deletion +## build index-deletion image +docker/build/index-deletion: + @make DOCKERFILE="$(ROOTDIR)/dockers/index/job/deletion/Dockerfile" \ + IMAGE=$(INDEX_DELETION_IMAGE) \ + docker/build/image + .PHONY: docker/name/index-operator docker/name/index-operator: @echo "$(ORG)/$(INDEX_OPERATOR_IMAGE)" @@ -343,7 +385,7 @@ docker/name/benchmark-job: docker/build/benchmark-job: @make DOCKERFILE="$(ROOTDIR)/dockers/tools/benchmark/job/Dockerfile" \ IMAGE=$(BENCHMARK_JOB_IMAGE) \ - DOCKER_OPTS="--build-arg ZLIB_VERSION=$(ZLIB_VERSION) --build-arg HDF5_VERSION=$(HDF5_VERSION)" \ + DOCKER_OPTS="$${DOCKER_OPTS:+$${DOCKER_OPTS}} --build-arg ZLIB_VERSION=$(ZLIB_VERSION) --build-arg HDF5_VERSION=$(HDF5_VERSION)" \ docker/build/image .PHONY: docker/name/benchmark-operator @@ -356,3 +398,15 @@ docker/build/benchmark-operator: @make DOCKERFILE="$(ROOTDIR)/dockers/tools/benchmark/operator/Dockerfile" \ IMAGE=$(BENCHMARK_OPERATOR_IMAGE) \ docker/build/image + +.PHONY: docker/name/example-client +docker/name/example-client: + @echo "$(ORG)/$(EXAMPLE_CLIENT_IMAGE)" + +.PHONY: docker/build/example-client +## build example client docker image +docker/build/example-client: + @make DOCKERFILE="$(ROOTDIR)/dockers/example/client/Dockerfile" \ + IMAGE=$(EXAMPLE_CLIENT_IMAGE) \ + DOCKER_OPTS="$${DOCKER_OPTS:+$${DOCKER_OPTS}} --build-arg ZLIB_VERSION=$(ZLIB_VERSION) --build-arg HDF5_VERSION=$(HDF5_VERSION)" \ + docker/build/image diff --git a/Makefile.d/e2e.mk b/Makefile.d/e2e.mk index 982fab49df..6ebdc3ad35 100644 --- a/Makefile.d/e2e.mk +++ b/Makefile.d/e2e.mk @@ -108,7 +108,7 @@ e2e/actions/run/stream/crud: \ sleep 2 kubectl wait -n kube-system --for=condition=Ready pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) kubectl wait -n kube-system --for=condition=ContainersReady pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) - make k8s/vald/deploy \ + $(MAKE) k8s/vald/deploy \ HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-lb.yaml sleep 3 kubectl wait --for=condition=Ready pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) @@ -116,8 +116,8 @@ e2e/actions/run/stream/crud: \ kubectl get pods pod_name=$$(kubectl get pods --selector="app=$(LB_GATEWAY_IMAGE)" | tail -1 | awk '{print $$1}'); \ echo $$pod_name; \ - make E2E_TARGET_POD_NAME=$$pod_name e2e - make k8s/vald/delete + $(MAKE) E2E_TARGET_POD_NAME=$$pod_name e2e + $(MAKE) k8s/vald/delete $(MAKE) k3d/delete .PHONY: e2e/actions/run/job @@ -129,7 +129,7 @@ e2e/actions/run/job: \ sleep 2 kubectl wait -n kube-system --for=condition=Ready pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) kubectl wait -n kube-system --for=condition=ContainersReady pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) - make k8s/vald/deploy \ + $(MAKE) k8s/vald/deploy \ HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-correction.yaml sleep 3 kubectl wait --for=condition=Ready pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) @@ -137,8 +137,8 @@ e2e/actions/run/job: \ kubectl get pods pod_name=$$(kubectl get pods --selector="app=$(LB_GATEWAY_IMAGE)" | tail -1 | awk '{print $$1}'); \ echo $$pod_name; \ - make E2E_TARGET_POD_NAME=$$pod_name e2e/index/job/correction - make k8s/vald/delete + $(MAKE) E2E_TARGET_POD_NAME=$$pod_name e2e/index/job/correction + $(MAKE) k8s/vald/delete $(MAKE) k3d/delete .PHONY: e2e/actions/run/readreplica @@ -151,12 +151,12 @@ e2e/actions/run/readreplica: \ kubectl wait -n kube-system --for=condition=Ready pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) kubectl wait -n kube-system --for=condition=ContainersReady pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) sleep 3 - make k8s/vald/deploy \ + $(MAKE) k8s/vald/deploy \ HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-readreplica.yaml sleep 20 kubectl wait --for=condition=Ready pod -l "app=$(AGENT_NGT_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) kubectl wait --for=condition=ContainersReady pod -l "app=$(AGENT_NGT_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) - make k8s/vald-readreplica/deploy \ + $(MAKE) k8s/vald-readreplica/deploy \ HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-readreplica.yaml sleep 3 kubectl wait --for=condition=Ready pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) @@ -164,6 +164,29 @@ e2e/actions/run/readreplica: \ kubectl get pods pod_name=$$(kubectl get pods --selector="app=$(LB_GATEWAY_IMAGE)" | tail -1 | awk '{print $$1}'); \ echo $$pod_name; \ - make E2E_TARGET_POD_NAME=$$pod_name e2e/readreplica - make k8s/vald/delete + $(MAKE) E2E_TARGET_POD_NAME=$$pod_name e2e/readreplica + $(MAKE) k8s/vald/delete $(MAKE) minikube/delete + +.PHONY: e2e/actions/run/stream/crud/skip +## run GitHub Actions E2E test (Stream CRUD with SkipExistsCheck = true) +e2e/actions/run/stream/crud/skip: \ + hack/benchmark/assets/dataset/$(E2E_DATASET_NAME) \ + k3d/restart + kubectl wait -n kube-system --for=condition=Available deployment/metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + sleep 2 + kubectl wait -n kube-system --for=condition=Ready pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + kubectl wait -n kube-system --for=condition=ContainersReady pod -l k8s-app=metrics-server --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + $(MAKE) k8s/vald/deploy \ + HELM_VALUES=$(ROOTDIR)/.github/helm/values/values-lb.yaml + sleep 3 + kubectl wait --for=condition=Ready pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + kubectl wait --for=condition=ContainersReady pod -l "app=$(LB_GATEWAY_IMAGE)" --timeout=$(E2E_WAIT_FOR_START_TIMEOUT) + kubectl get pods + pod_name=$$(kubectl get pods --selector="app=$(LB_GATEWAY_IMAGE)" | tail -1 | awk '{print $$1}'); \ + echo $$pod_name; \ + $(MAKE) E2E_TARGET_POD_NAME=$$pod_name e2e/skip + $(MAKE) k8s/vald/delete + $(MAKE) k3d/delete + + diff --git a/Makefile.d/functions.mk b/Makefile.d/functions.mk index d8997528dc..fb4feb167f 100644 --- a/Makefile.d/functions.mk +++ b/Makefile.d/functions.mk @@ -42,7 +42,7 @@ define profile-web endef define go-lint - golangci-lint run --config .golangci.yml --fix + golangci-lint run --config .golangci.yaml --fix endef define go-vet @@ -74,7 +74,7 @@ define go-build -X '$(GOPKG)/internal/info.AlgorithmInfo=$5' \ -X '$(GOPKG)/internal/info.BuildCPUInfoFlags=$(CPU_INFO_FLAGS)' \ -X '$(GOPKG)/internal/info.BuildTime=$(DATETIME)' \ - -X '$(GOPKG)/internal/info.CGOEnabled=$(CGO_ENABLED)' \ + -X '$(GOPKG)/internal/info.CGOEnabled=$(if $(filter 1,$(strip $(CGO_ENABLED))),true,false)' \ -X '$(GOPKG)/internal/info.GitCommit=$(GIT_COMMIT)' \ -X '$(GOPKG)/internal/info.GoArch=$(GOARCH)' \ -X '$(GOPKG)/internal/info.GoOS=$(GOOS)' \ @@ -91,6 +91,35 @@ define go-build $6 -version endef +define go-example-build + echo $(GO_SOURCES_INTERNAL) + echo $(PBGOS) + echo $(shell find $(ROOTDIR)/$1 -type f -name '*.go' -not -name '*_test.go' -not -name 'doc.go') + cd $(ROOTDIR)/$1 && \ + CFLAGS="$(CFLAGS)" \ + CXXFLAGS="$(CXXFLAGS)" \ + CGO_ENABLED=$(CGO_ENABLED) \ + CGO_CXXFLAGS="$3" \ + CGO_FFLAGS="$3" \ + CGO_LDFLAGS="$3" \ + GO111MODULE=on \ + GOARCH=$(GOARCH) \ + GOOS=$(GOOS) \ + GOPRIVATE=$(GOPRIVATE) \ + GO_VERSION=$(GO_VERSION) \ + go build \ + --ldflags "-w $2 \ + -extldflags '$3' \ + -buildid=" \ + -modcacherw \ + -mod=readonly \ + -a \ + -tags "osusergo netgo static_build$4" \ + -trimpath \ + -o $(ROOTDIR)/$6 \ + main.go +endef + define telepresence [ -z $(SWAP_IMAGE) ] && IMAGE=$2 || IMAGE=$(SWAP_IMAGE) \ && echo "telepresence replaces $(SWAP_DEPLOYMENT_TYPE)/$1 with $${IMAGE}:$(SWAP_TAG)" \ @@ -401,3 +430,27 @@ define update-github-actions done endef +define gen-deadlink-checker + BIN_PATH="$(TEMP_DIR)/vald-deadlink-checker-gen"; \ + rm -rf $$BIN_PATH; \ + MAINTAINER=$2 \ + GOPRIVATE=$(GOPRIVATE) \ + GOARCH=$(GOARCH) \ + GOOS=$(GOOS) \ + go build -modcacherw \ + -mod=readonly \ + -a \ + -tags "osusergo netgo static_build" \ + -trimpath \ + -o $$BIN_PATH $(ROOTDIR)/hack/tools/deadlink/main.go; \ + $$BIN_PATH -path $3 -ignore-path $4 -format $5 $1; \ + rm -rf $$BIN_PATH +endef + +define gen-api-document + buf generate --template=apis/docs/buf.gen.tmpl.yaml --path $2 + cat apis/docs/v1/payload.md.tmpl apis/docs/v1/_doc.md.tmpl > apis/docs/v1/doc.md.tmpl; \ + buf generate --template=apis/docs/buf.gen.doc.yaml --path $2; \ + mv $(ROOTDIR)/apis/docs/v1/doc.md $1; \ + rm apis/docs/v1/*doc.md.tmpl +endef diff --git a/Makefile.d/git.mk b/Makefile.d/git.mk index f58d62d932..ac3a603a8e 100644 --- a/Makefile.d/git.mk +++ b/Makefile.d/git.mk @@ -16,7 +16,7 @@ .PHONY: git/config/init ## add git configs required for development git/config/init: - git config commit.template ".commit_template" + git config commit.template "$(ROOTDIR)/.commit_template" git config core.fileMode false .PHONY: git/hooks/init diff --git a/Makefile.d/helm.mk b/Makefile.d/helm.mk index f1c9f99fcd..c636bea47d 100644 --- a/Makefile.d/helm.mk +++ b/Makefile.d/helm.mk @@ -20,7 +20,12 @@ helm/install: $(BINDIR)/helm $(BINDIR)/helm: mkdir -p $(BINDIR) - curl -fsSL "https://raw.githubusercontent.com/helm/helm/main/scripts/get-helm-3" | HELM_INSTALL_DIR=$(BINDIR) bash + $(eval DARCH := $(subst aarch64,arm64,$(ARCH))) + TAR_NAME=helm-$(HELM_VERSION)-$(OS)-$(subst x86_64,amd64,$(shell echo $(DARCH) | tr '[:upper:]' '[:lower:]')) \ + && cd $(TEMP_DIR) \ + && curl -fsSL "https://get.helm.sh/$${TAR_NAME}.tar.gz" -o "$(TEMP_DIR)/$${TAR_NAME}" \ + && tar -xzvf "$(TEMP_DIR)/$${TAR_NAME}" --strip=1 \ + && mv helm $(BINDIR)/helm .PHONY: helm-docs/install ## install helm-docs @@ -28,23 +33,24 @@ helm-docs/install: $(BINDIR)/helm-docs $(BINDIR)/helm-docs: mkdir -p $(BINDIR) - TAR_NAME=helm-docs_$(HELM_DOCS_VERSION)_$(UNAME)_$(ARCH).tar.gz \ + $(eval DARCH := $(subst aarch64,arm64,$(ARCH))) + TAR_NAME=helm-docs_$(HELM_DOCS_VERSION)_$(UNAME)_$(DARCH).tar.gz \ && cd $(TEMP_DIR) \ && curl -fsSL "https://github.com/norwoodj/helm-docs/releases/download/v$(HELM_DOCS_VERSION)/$${TAR_NAME}" -o "$(TEMP_DIR)/$${TAR_NAME}"\ - && tar xzvf "$(TEMP_DIR)/$${TAR_NAME}" \ + && tar -xzvf "$(TEMP_DIR)/$${TAR_NAME}" \ && mv helm-docs $(BINDIR)/helm-docs .PHONY: helm/package/vald ## packaging Helm chart for Vald helm/package/vald: - helm package charts/vald + helm package $(ROOTDIR)/charts/vald .PHONY: helm/package/vald-helm-operator ## packaging Helm chart for vald-helm-operator helm/package/vald-helm-operator: \ helm/schema/crd/vald \ helm/schema/crd/vald-helm-operator - helm package charts/vald-helm-operator + helm package $(ROOTDIR)/charts/vald-helm-operator .PHONY: helm/package/vald-benchmark-operator ## packaging Helm chart for vald-helm-operator @@ -52,11 +58,11 @@ helm/package/vald-benchmark-operator: \ helm/schema/crd/vald-benchmark-job \ helm/schema/crd/vald-benchmark-scenario \ helm/schema/crd/vald-benchmark-operator - helm package charts/vald-benchmark-operator + helm package $(ROOTDIR)/charts/vald-benchmark-operator .PHONY: helm/package/vald-readreplica helm/package/vald-readreplica: - helm package charts/vald-readreplica + helm package $(ROOTDIR)/charts/vald-readreplica .PHONY: helm/repo/add ## add Helm chart repository @@ -64,42 +70,42 @@ helm/repo/add: helm repo add vald https://vald.vdaas.org/charts .PHONY: helm/docs/vald -helm/docs/vald: charts/vald/README.md +helm/docs/vald: $(ROOTDIR)/charts/vald/README.md # force to rebuild -.PHONY: charts/vald/README.md -charts/vald/README.md: \ - charts/vald/README.md.gotmpl \ - charts/vald/values.yaml +.PHONY: $(ROOTDIR)/charts/vald/README.md +$(ROOTDIR)/charts/vald/README.md: \ + $(ROOTDIR)/charts/vald/README.md.gotmpl \ + $(ROOTDIR)/charts/vald/values.yaml helm-docs .PHONY: helm/docs/vald-helm-operator -helm/docs/vald-helm-operator: charts/vald-helm-operator/README.md +helm/docs/vald-helm-operator: $(ROOTDIR)/charts/vald-helm-operator/README.md # force to rebuild -.PHONY: charts/vald-helm-operator/README.md -charts/vald-helm-operator/README.md: \ - charts/vald-helm-operator/README.md.gotmpl \ - charts/vald-helm-operator/values.yaml +.PHONY: $(ROOTDIR)/charts/vald-helm-operator/README.md +$(ROOTDIR)/charts/vald-helm-operator/README.md: \ + $(ROOTDIR)/charts/vald-helm-operator/README.md.gotmpl \ + $(ROOTDIR)/charts/vald-helm-operator/values.yaml helm-docs .PHONY: helm/docs/vald-readreplica -helm/docs/vald-readreplica: charts/vald-readreplica/README.md +helm/docs/vald-readreplica: $(ROOTDIR)/charts/vald-readreplica/README.md .PHONY: helm/docs/vald-benchmark-operator -helm/docs/vald-benchmark-operator: charts/vald-benchmark-operator/README.md +helm/docs/vald-benchmark-operator: $(ROOTDIR)/charts/vald-benchmark-operator/README.md -.PHONY: charts/vald-benchmark-operator/README.md -charts/vald-benchmark-operator/README.md: \ - charts/vald-benchmark-operator/README.md.gotmpl \ - charts/vald-benchmark-operator/values.yaml +.PHONY: $(ROOTDIR)/charts/vald-benchmark-operator/README.md +$(ROOTDIR)/charts/vald-benchmark-operator/README.md: \ + $(ROOTDIR)/charts/vald-benchmark-operator/README.md.gotmpl \ + $(ROOTDIR)/charts/vald-benchmark-operator/values.yaml helm-docs # force to rebuild -.PHONY: charts/vald-readreplica/README.md -charts/vald-readreplica/README.md: \ - charts/vald-readreplica/README.md.gotmpl \ - charts/vald-readreplica/values.yaml +.PHONY: $(ROOTDIR)/charts/vald-readreplica/README.md +$(ROOTDIR)/charts/vald-readreplica/README.md: \ + $(ROOTDIR)/charts/vald-readreplica/README.md.gotmpl \ + $(ROOTDIR)/charts/vald-readreplica/values.yaml helm-docs .PHONY: helm/schema/all @@ -112,54 +118,44 @@ helm/schema/all: \ .PHONY: helm/schema/vald ## generate json schema for Vald Helm Chart -helm/schema/vald: charts/vald/values.schema.json +helm/schema/vald: $(ROOTDIR)/charts/vald/values.schema.json -charts/vald/values.schema.json: \ - charts/vald/values.yaml +$(ROOTDIR)/charts/vald/values.schema.json: \ + $(ROOTDIR)/charts/vald/values.yaml $(call gen-vald-helm-schema,vald/values) .PHONY: helm/schema/vald-helm-operator ## generate json schema for Vald Helm Operator Chart -helm/schema/vald-helm-operator: charts/vald-helm-operator/values.schema.json +helm/schema/vald-helm-operator: $(ROOTDIR)/charts/vald-helm-operator/values.schema.json -charts/vald-helm-operator/values.schema.json: \ - charts/vald-helm-operator/values.yaml +$(ROOTDIR)/charts/vald-helm-operator/values.schema.json: \ + $(ROOTDIR)/charts/vald-helm-operator/values.yaml $(call gen-vald-helm-schema,vald-helm-operator/values) .PHONY: helm/schema/vald-benchmark-job ## generate json schema for Vald Benchmark Job Chart -helm/schema/vald-benchmark-job: charts/vald-benchmark-operator/job-values.schema.json +helm/schema/vald-benchmark-job: $(ROOTDIR)/charts/vald-benchmark-operator/job-values.schema.json -charts/vald-benchmark-operator/job-values.schema.json: \ - charts/vald-benchmark-operator/schemas/job-values.yaml +$(ROOTDIR)/charts/vald-benchmark-operator/job-values.schema.json: \ + $(ROOTDIR)/charts/vald-benchmark-operator/schemas/job-values.yaml $(call gen-vald-helm-schema,vald-benchmark-operator/schemas/job-values) .PHONY: helm/schema/vald-benchmark-scenario ## generate json schema for Vald Benchmark Job Chart -helm/schema/vald-benchmark-scenario: charts/vald-benchmark-operator/scenario-values.schema.json +helm/schema/vald-benchmark-scenario: $(ROOTDIR)/charts/vald-benchmark-operator/scenario-values.schema.json -charts/vald-benchmark-operator/scenario-values.schema.json: \ - charts/vald-benchmark-operator/schemas/scenario-values.yaml +$(ROOTDIR)/charts/vald-benchmark-operator/scenario-values.schema.json: \ + $(ROOTDIR)/charts/vald-benchmark-operator/schemas/scenario-values.yaml $(call gen-vald-helm-schema,vald-benchmark-operator/schemas/scenario-values) .PHONY: helm/schema/vald-benchmark-operator ## generate json schema for Vald Benchmark Operator Chart -helm/schema/vald-benchmark-operator: charts/vald-benchmark-operator/values.schema.json +helm/schema/vald-benchmark-operator: $(ROOTDIR)/charts/vald-benchmark-operator/values.schema.json -charts/vald-benchmark-operator/values.schema.json: \ - charts/vald-benchmark-operator/values.yaml +$(ROOTDIR)/charts/vald-benchmark-operator/values.schema.json: \ + $(ROOTDIR)/charts/vald-benchmark-operator/values.yaml $(call gen-vald-helm-schema,vald-benchmark-operator/values) -.PHONY: yq/install -## install yq -yq/install: $(BINDIR)/yq - -$(BINDIR)/yq: - mkdir -p $(BINDIR) - cd $(TEMP_DIR) \ - && curl -fsSL https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(subst x86_64,amd64,$(shell echo $(ARCH) | tr '[:upper:]' '[:lower:]')) -o $(BINDIR)/yq \ - && chmod a+x $(BINDIR)/yq - .PHONY: helm/schema/crd/all helm/schema/crd/all: \ helm/schema/crd/vald \ diff --git a/Makefile.d/k8s.mk b/Makefile.d/k8s.mk index 6480c5f503..a6b9ff0dfd 100644 --- a/Makefile.d/k8s.mk +++ b/Makefile.d/k8s.mk @@ -41,21 +41,22 @@ k8s/manifest/update: \ --set manager.index.saver.enabled=true \ --set manager.index.creator.enabled=true \ --set manager.index.corrector.enabled=true \ + --set gateway.mirror.enabled=true \ --output-dir $(TEMP_DIR) \ charts/vald - mkdir -p k8s/gateway - mkdir -p k8s/manager - mkdir -p k8s/index/job - mkdir -p k8s/index/job/readreplica - mv $(TEMP_DIR)/vald/templates/agent k8s/agent - mv $(TEMP_DIR)/vald/templates/discoverer k8s/discoverer - mv $(TEMP_DIR)/vald/templates/gateway k8s/gateway - mv $(TEMP_DIR)/vald/templates/manager/index k8s/manager/index - mv $(TEMP_DIR)/vald/templates/index/operator k8s/index/operator - mv $(TEMP_DIR)/vald/templates/index/job/correction k8s/index/job/correction - mv $(TEMP_DIR)/vald/templates/index/job/creation k8s/index/job/creation - mv $(TEMP_DIR)/vald/templates/index/job/save k8s/index/job/save - mv $(TEMP_DIR)/vald/templates/index/job/readreplica/rotate k8s/index/job/readreplica/rotate + mkdir -p $(ROOTDIR)/k8s/gateway + mkdir -p $(ROOTDIR)/k8s/manager + mkdir -p $(ROOTDIR)/k8s/index/job + mkdir -p $(ROOTDIR)/k8s/index/job/readreplica + mv $(TEMP_DIR)/vald/templates/agent $(ROOTDIR)/k8s/agent + mv $(TEMP_DIR)/vald/templates/discoverer $(ROOTDIR)/k8s/discoverer + mv $(TEMP_DIR)/vald/templates/gateway $(ROOTDIR)/k8s/gateway + mv $(TEMP_DIR)/vald/templates/manager/index $(ROOTDIR)/k8s/manager/index + mv $(TEMP_DIR)/vald/templates/index/operator $(ROOTDIR)/k8s/index/operator + mv $(TEMP_DIR)/vald/templates/index/job/correction $(ROOTDIR)/k8s/index/job/correction + mv $(TEMP_DIR)/vald/templates/index/job/creation $(ROOTDIR)/k8s/index/job/creation + mv $(TEMP_DIR)/vald/templates/index/job/save $(ROOTDIR)/k8s/index/job/save + mv $(TEMP_DIR)/vald/templates/index/job/readreplica/rotate $(ROOTDIR)/k8s/index/job/readreplica/rotate rm -rf $(TEMP_DIR) .PHONY: k8s/manifest/helm-operator/clean @@ -71,10 +72,10 @@ k8s/manifest/helm-operator/update: \ helm template \ --output-dir $(TEMP_DIR) \ charts/vald-helm-operator - mkdir -p k8s/operator - mv $(TEMP_DIR)/vald-helm-operator/templates k8s/operator/helm + mkdir -p $(ROOTDIR)/k8s/operator + mv $(TEMP_DIR)/vald-helm-operator/templates $(ROOTDIR)/k8s/operator/helm rm -rf $(TEMP_DIR) - cp -r charts/vald-helm-operator/crds k8s/operator/helm/crds + cp -r $(ROOTDIR)/charts/vald-helm-operator/crds $(ROOTDIR)/k8s/operator/helm/crds .PHONY: k8s/manifest/benchmark-operator/clean ## clean k8s manifests for benchmark-operator @@ -89,10 +90,10 @@ k8s/manifest/benchmark-operator/update: \ helm template \ --output-dir $(TEMP_DIR) \ charts/vald-benchmark-operator - mkdir -p k8s/tools/benchmark - mv $(TEMP_DIR)/vald-benchmark-operator/templates k8s/tools/benchmark/operator + mkdir -p $(ROOTDIR)/k8s/tools/benchmark + mv $(TEMP_DIR)/vald-benchmark-operator/templates $(ROOTDIR)/k8s/tools/benchmark/operator rm -rf $(TEMP_DIR) - cp -r charts/vald-benchmark-operator/crds k8s/tools/benchmark/operator/crds + cp -r $(ROOTDIR)/charts/vald-benchmark-operator/crds $(ROOTDIR)/k8s/tools/benchmark/operator/crds .PHONY: k8s/manifest/readreplica/clean ## clean k8s manifests for readreplica @@ -107,7 +108,7 @@ k8s/manifest/readreplica/update: \ helm template \ --output-dir $(TEMP_DIR) \ charts/vald-readreplica - mv $(TEMP_DIR)/vald-readreplica/templates k8s/readreplica + mv $(TEMP_DIR)/vald-readreplica/templates $(ROOTDIR)/k8s/readreplica rm -rf $(TEMP_DIR) .PHONY: k8s/vald/deploy @@ -189,15 +190,15 @@ k8s/multi/vald/deploy: -@kubectl create ns $(MIRROR01_NAMESPACE) -@kubectl create ns $(MIRROR02_NAMESPACE) -@kubectl create ns $(MIRROR03_NAMESPACE) - helm install vald-cluster-01 charts/vald \ + helm install vald-cluster-01 $(ROOTDIR)/charts/vald \ -f $(ROOTDIR)/charts/vald/values/multi-vald/dev-vald-with-mirror.yaml \ -f $(ROOTDIR)/charts/vald/values/multi-vald/dev-vald-01.yaml \ -n $(MIRROR01_NAMESPACE) - helm install vald-cluster-02 charts/vald \ + helm install vald-cluster-02 $(ROOTDIR)/charts/vald \ -f $(ROOTDIR)/charts/vald/values/multi-vald/dev-vald-with-mirror.yaml \ -f $(ROOTDIR)/charts/vald/values/multi-vald/dev-vald-02.yaml \ -n $(MIRROR02_NAMESPACE) - helm install vald-cluster-03 charts/vald \ + helm install vald-cluster-03 $(ROOTDIR)/charts/vald \ -f $(ROOTDIR)/charts/vald/values/multi-vald/dev-vald-with-mirror.yaml \ -f $(ROOTDIR)/charts/vald/values/multi-vald/dev-vald-03.yaml \ -n $(MIRROR03_NAMESPACE) @@ -345,18 +346,18 @@ k8s/external/cert-manager/delete: .PHONY: k8s/external/minio/deploy ## deploy minio k8s/external/minio/deploy: - kubectl apply -f k8s/external/minio/deployment.yaml - kubectl apply -f k8s/external/minio/svc.yaml + kubectl apply -f $(ROOTDIR)/k8s/external/minio/deployment.yaml + kubectl apply -f $(ROOTDIR)/k8s/external/minio/svc.yaml sleep $(K8S_SLEEP_DURATION_FOR_WAIT_COMMAND) kubectl wait --for=condition=ready pod -l app=minio --timeout=600s - kubectl apply -f k8s/external/minio/mb-job.yaml + kubectl apply -f $(ROOTDIR)/k8s/external/minio/mb-job.yaml sleep $(K8S_SLEEP_DURATION_FOR_WAIT_COMMAND) kubectl wait --for=condition=complete job/minio-make-bucket --timeout=600s .PHONY: k8s/external/minio/delete ## delete minio k8s/external/minio/delete: - kubectl delete -f k8s/external/minio + kubectl delete -f $(ROOTDIR)/k8s/external/minio .PHONY: k8s/metrics/metrics-server/deploy ## deploy metrics-serrver @@ -373,12 +374,12 @@ k8s/metrics/metrics-server/delete: .PHONY: k8s/metrics/prometheus/deploy ## deploy prometheus k8s/metrics/prometheus/deploy: - kubectl apply -f k8s/metrics/prometheus + kubectl apply -f $(ROOTDIR)/k8s/metrics/prometheus .PHONY: k8s/metrics/prometheus/delete ## delete prometheus k8s/metrics/prometheus/delete: - kubectl delete -f k8s/metrics/prometheus + kubectl delete -f $(ROOTDIR)/k8s/metrics/prometheus .PHONY: k8s/metrics/prometheus/operator/deploy ## deploy prometheus operator @@ -394,14 +395,14 @@ k8s/metrics/prometheus/operator/delete: .PHONY: k8s/metrics/grafana/deploy ## deploy grafana k8s/metrics/grafana/deploy: - kubectl apply -f k8s/metrics/grafana/dashboards - kubectl apply -f k8s/metrics/grafana + kubectl apply -f $(ROOTDIR)/k8s/metrics/grafana/dashboards + kubectl apply -f $(ROOTDIR)/k8s/metrics/grafana .PHONY: k8s/metrics/grafana/delete ## delete grafana k8s/metrics/grafana/delete: - kubectl delete -f k8s/metrics/grafana/dashboards - kubectl delete -f k8s/metrics/grafana + kubectl delete -f $(ROOTDIR)/k8s/metrics/grafana/dashboards + kubectl delete -f $(ROOTDIR)/k8s/metrics/grafana .PHONY: k8s/metrics/jaeger/deploy ## deploy jaeger @@ -411,63 +412,63 @@ k8s/metrics/jaeger/deploy: kubectl wait --for=condition=ready pod -l app.kubernetes.io/name=jaeger-operator --timeout=60s kubectl wait --for=condition=available deployment/jaeger-jaeger-operator --timeout=60s sleep $(JAEGER_OPERATOR_WAIT_DURATION) - kubectl apply -f k8s/metrics/jaeger/jaeger.yaml + kubectl apply -f $(ROOTDIR)/k8s/metrics/jaeger/jaeger.yaml .PHONY: k8s/metrics/jaeger/delete ## delete jaeger k8s/metrics/jaeger/delete: - kubectl delete -f k8s/metrics/jaeger + kubectl delete -f $(ROOTDIR)/k8s/metrics/jaeger helm uninstall jaeger .PHONY: k8s/metrics/loki/deploy ## deploy loki and promtail k8s/metrics/loki/deploy: - kubectl apply -f k8s/metrics/loki + kubectl apply -f $(ROOTDIR)/k8s/metrics/loki .PHONY: k8s/metrics/loki/delete ## delete loki and promtail k8s/metrics/loki/delete: - kubectl delete -f k8s/metrics/loki + kubectl delete -f $(ROOTDIR)/k8s/metrics/loki .PHONY: k8s/metrics/tempo/deploy ## deploy tempo and jaeger-agent k8s/metrics/tempo/deploy: - kubectl apply -f k8s/metrics/tempo + kubectl apply -f $(ROOTDIR)/k8s/metrics/tempo .PHONY: k8s/metrics/tempo/delete ## delete tempo and jaeger-agent k8s/metrics/tempo/delete: - kubectl delete -f k8s/metrics/tempo + kubectl delete -f $(ROOTDIR)/k8s/metrics/tempo .PHONY: k8s/metrics/profefe/deploy ## deploy profefe k8s/metrics/profefe/deploy: - kubectl apply -f k8s/metrics/profefe + kubectl apply -f $(ROOTDIR)/k8s/metrics/profefe .PHONY: k8s/metrics/profefe/delete ## delete profefe k8s/metrics/profefe/delete: - kubectl delete -f k8s/metrics/profefe + kubectl delete -f $(ROOTDIR)/k8s/metrics/profefe .PHONY: k8s/metrics/pyroscope/deploy ## deploy pyroscope k8s/metrics/pyroscope/deploy: - kubectl apply -k k8s/metrics/pyroscope/base + kubectl apply -k $(ROOTDIR)/k8s/metrics/pyroscope/base .PHONY: k8s/metrics/pyroscope/delete ## delete pyroscope k8s/metrics/pyroscope/delete: - kubectl delete -k k8s/metrics/pyroscope/base + kubectl delete -k $(ROOTDIR)/k8s/metrics/pyroscope/base .PHONY: k8s/metrics/pyroscope/pv/deploy ## deploy pyroscope on persistent volume k8s/metrics/pyroscope/pv/deploy: - kubectl apply -k k8s/metrics/pyroscope/overlay + kubectl apply -k $(ROOTDIR)/k8s/metrics/pyroscope/overlay .PHONY: k8s/metrics/pyroscope/pv/delete ## delete pyroscope on persistent volume k8s/metrics/pyroscope/pv/delete: - kubectl delete -k k8s/metrics/pyroscope/overlay + kubectl delete -k $(ROOTDIR)/k8s/metrics/pyroscope/overlay .PHONY: k8s/linkerd/deploy ## deploy linkerd to k8s @@ -533,8 +534,9 @@ telepresence/install: $(BINDIR)/telepresence $(BINDIR)/telepresence: mkdir -p $(BINDIR) + $(eval DARCH := $(subst aarch64,arm64,$(ARCH))) cd $(TEMP_DIR) \ - && curl -fsSL "https://app.getambassador.io/download/tel2oss/releases/download/v$(TELEPRESENCE_VERSION)/telepresence-$(OS)-$(subst x86_64,amd64,$(shell echo $(ARCH) | tr '[:upper:]' '[:lower:]'))" -o $(BINDIR)/telepresence \ + && curl -fsSL "https://app.getambassador.io/download/tel2oss/releases/download/v$(TELEPRESENCE_VERSION)/telepresence-$(OS)-$(subst x86_64,amd64,$(shell echo $(DARCH) | tr '[:upper:]' '[:lower:]'))" -o $(BINDIR)/telepresence \ && chmod a+x $(BINDIR)/telepresence .PHONY: telepresence/swap/agent-ngt diff --git a/Makefile.d/kind.mk b/Makefile.d/kind.mk index 4d87429f47..c83bf5783f 100644 --- a/Makefile.d/kind.mk +++ b/Makefile.d/kind.mk @@ -19,7 +19,8 @@ kind/install: $(BINDIR)/kind $(BINDIR)/kind: mkdir -p $(BINDIR) - curl -fsSL https://github.com/kubernetes-sigs/kind/releases/download/v$(KIND_VERSION)/kind-$(OS)-$(subst x86_64,amd64,$(shell echo $(ARCH) | tr '[:upper:]' '[:lower:]')) -o $(BINDIR)/kind + $(eval DARCH := $(subst aarch64,arm64,$(ARCH))) + curl -fsSL https://github.com/kubernetes-sigs/kind/releases/download/v$(KIND_VERSION)/kind-$(OS)-$(subst x86_64,amd64,$(shell echo $(DARCH) | tr '[:upper:]' '[:lower:]')) -o $(BINDIR)/kind chmod a+x $(BINDIR)/kind .PHONY: kind/start diff --git a/Makefile.d/proto.mk b/Makefile.d/proto.mk index f26164ca5a..9b1ec1d309 100644 --- a/Makefile.d/proto.mk +++ b/Makefile.d/proto.mk @@ -17,14 +17,15 @@ ## build protobufs proto/all: \ proto/deps \ - proto/gen + proto/gen/code \ + proto/gen/api/docs .PHONY: proto/clean ## clean proto artifacts proto/clean: - find apis/grpc -name "*.pb.go" | xargs -P$(CORES) rm -f - find apis/grpc -name "*.pb.json.go" | xargs -P$(CORES) rm -f - rm -rf apis/swagger apis/docs + find $(ROOTDIR)/apis/grpc -name "*.pb.go" | xargs -P$(CORES) rm -f + find $(ROOTDIR)/apis/grpc -name "*.pb.json.go" | xargs -P$(CORES) rm -f + rm -rf $(ROOTDIR)/apis/swagger $(ROOTDIR)/apis/docs .PHONY: proto/paths/print ## print proto paths @@ -34,23 +35,28 @@ proto/paths/print: .PHONY: proto/deps ## install protobuf dependencies proto/deps: \ - $(GOBIN)/buf + $(GOBIN)/buf \ + $(GOBIN)/protoc-gen-doc .PHONY: proto/clean/deps ## uninstall all protobuf dependencies proto/clean/deps: rm -rf $(GOBIN)/buf + rm -rf $(GOBIN)/protoc-gen-doc $(GOBIN)/buf: $(call go-install, github.com/bufbuild/buf/cmd/buf) +$(GOBIN)/protoc-gen-doc: + $(call go-install, github.com/pseudomuto/protoc-gen-doc/cmd/protoc-gen-doc) + $(ROOTDIR)/apis/proto/v1/rpc/errdetails/error_details.proto: curl -fsSL https://raw.githubusercontent.com/googleapis/googleapis/master/google/rpc/error_details.proto -o $(ROOTDIR)/apis/proto/v1/rpc/errdetails/error_details.proto sed -i -e "s/package google.rpc/package rpc.v1/" $(ROOTDIR)/apis/proto/v1/rpc/errdetails/error_details.proto sed -i -e "s%google.golang.org/genproto/googleapis/rpc/errdetails;errdetails%$(GOPKG)/apis/grpc/v1/rpc/errdetails%" $(ROOTDIR)/apis/proto/v1/rpc/errdetails/error_details.proto sed -i -e "s/com.google.rpc/org.vdaas.vald.api.v1.rpc/" $(ROOTDIR)/apis/proto/v1/rpc/errdetails/error_details.proto -proto/gen: \ +proto/gen/code: \ $(PROTOS) \ proto/deps @$(call green, "generating pb.go and swagger.json files and documents for API v1...") @@ -58,6 +64,25 @@ proto/gen: \ buf generate make proto/replace +proto/gen/api/docs: \ + proto/gen/api/docs/payload \ + $(PROTO_VALD_API_DOCS) \ + $(PROTO_MIRROR_API_DOCS) + +proto/gen/api/docs/payload: $(ROOTDIR)/apis/docs/v1/payload.md.tmpl + +$(ROOTDIR)/apis/docs/v1/payload.md.tmpl: $(ROOTDIR)/apis/proto/v1/payload/payload.proto $(ROOTDIR)/apis/docs/v1/payload.tmpl + @$(call green,"generating payload v1...") + buf generate --template=apis/docs/buf.gen.payload.yaml + +$(ROOTDIR)/apis/docs/v1/%.md: $(ROOTDIR)/apis/proto/v1/vald/%.proto $(ROOTDIR)/apis/docs/v1/payload.md.tmpl $(ROOTDIR)/apis/docs/v1/doc.tmpl + @$(call green,"generating documents for API v1...") + @$(call gen-api-document,$@,$(subst $(ROOTDIR)/,,$<)) + +$(ROOTDIR)/apis/docs/v1/mirror.md: $(ROOTDIR)/apis/proto/v1/mirror/mirror.proto $(ROOTDIR)/apis/docs/v1/payload.md.tmpl $(ROOTDIR)/apis/docs/v1/doc.tmpl + @$(call green,"generating documents for API v1...") + @$(call gen-api-document,$@,$(subst $(ROOTDIR)/,,$<)) + proto/replace: find $(ROOTDIR)/apis/grpc/* -name '*.go' | xargs -P$(CORES) sed -i -E "s%google.golang.org/grpc/codes%$(GOPKG)/internal/net/grpc/codes%g" find $(ROOTDIR)/apis/grpc/* -name '*.go' | xargs -P$(CORES) sed -i -E "s%google.golang.org/grpc/status%$(GOPKG)/internal/net/grpc/status%g" diff --git a/Makefile.d/test.mk b/Makefile.d/test.mk index 767fcb2ab3..7b0e1361bf 100644 --- a/Makefile.d/test.mk +++ b/Makefile.d/test.mk @@ -284,6 +284,16 @@ test/cmd: CGO_LDFLAGS="$(CGO_LDFLAGS)" \ go test -short -shuffle=on -race -mod=readonly -cover $(ROOTDIR)/cmd/... +.PHONY: test/rust/qbg +## run tests for qbg +test/rust/qbg: + cargo test --manifest-path rust/Cargo.toml --package qbg --lib -- tests::test_ffi_qbg --exact --show-output + cargo test --manifest-path rust/Cargo.toml --package qbg --lib -- tests::test_ffi_qbg_prebuilt --exact --show-output + rm -rf rust/libs/algorithms/qbg/index/ + cargo test --manifest-path rust/Cargo.toml --package qbg --lib -- tests::test_property --exact --show-output + cargo test --manifest-path rust/Cargo.toml --package qbg --lib -- tests::test_index --exact --show-output + rm -rf rust/libs/algorithms/qbg/index/ + .PHONY: test/hack ## run tests for hack test/hack: diff --git a/Makefile.d/tools.mk b/Makefile.d/tools.mk index 8d7ee0dd66..75b4cf79d8 100644 --- a/Makefile.d/tools.mk +++ b/Makefile.d/tools.mk @@ -71,7 +71,8 @@ $(BINDIR)/reviewdog: kubectl/install: $(BINDIR)/kubectl $(BINDIR)/kubectl: - curl -fsSL "https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(OS)/$(subst x86_64,amd64,$(shell echo $(ARCH) | tr '[:upper:]' '[:lower:]'))/kubectl" -o $(BINDIR)/kubectl + $(eval DARCH := $(subst aarch64,arm64,$(ARCH))) + curl -fsSL "https://dl.k8s.io/release/$(KUBECTL_VERSION)/bin/$(OS)/$(subst x86_64,amd64,$(shell echo $(DARCH) | tr '[:upper:]' '[:lower:]'))/kubectl" -o $(BINDIR)/kubectl chmod a+x $(BINDIR)/kubectl .PHONY: textlint/install @@ -89,7 +90,33 @@ textlint/ci/install: cspell/install: $(NPM_GLOBAL_PREFIX)/bin/cspell $(NPM_GLOBAL_PREFIX)/bin/cspell: - npm install -g cspell@latest + npm install -g cspell@latest \ + @cspell/dict-cpp \ + @cspell/dict-docker \ + @cspell/dict-en_us \ + @cspell/dict-fullstack \ + @cspell/dict-git \ + @cspell/dict-golang \ + @cspell/dict-k8s \ + @cspell/dict-makefile \ + @cspell/dict-markdown \ + @cspell/dict-npm \ + @cspell/dict-public-licenses \ + @cspell/dict-rust \ + @cspell/dict-shell + cspell link add @cspell/dict-cpp + cspell link add @cspell/dict-docker + cspell link add @cspell/dict-en_us + cspell link add @cspell/dict-fullstack + cspell link add @cspell/dict-git + cspell link add @cspell/dict-golang + cspell link add @cspell/dict-k8s + cspell link add @cspell/dict-makefile + cspell link add @cspell/dict-markdown + cspell link add @cspell/dict-npm + cspell link add @cspell/dict-public-licenses + cspell link add @cspell/dict-rust + cspell link add @cspell/dict-shell .PHONY: buf/install buf/install: $(BINDIR)/buf @@ -189,13 +216,15 @@ $(LIB_PATH)/libz.a: $(LIB_PATH) -DBUILD_SHARED_LIBS=OFF \ -DBUILD_STATIC_EXECS=ON \ -DBUILD_TESTING=OFF \ - -DCMAKE_C_FLAGS="-fPIC" \ - -DCMAKE_INSTALL_PREFIX=$(USR_LOCAL) \ -DZLIB_BUILD_SHARED=OFF \ -DZLIB_BUILD_STATIC=ON \ - -DZLIB_USE_STATIC_LIBS=ON \ -DZLIB_COMPAT=ON \ - .. \ + -DZLIB_USE_STATIC_LIBS=ON \ + -DCMAKE_CXX_FLAGS="$(CXXFLAGS)" \ + -DCMAKE_C_FLAGS="$(CFLAGS)" \ + -DCMAKE_INSTALL_LIBDIR=$(LIB_PATH) \ + -DCMAKE_INSTALL_PREFIX=$(USR_LOCAL) \ + -B $(TEMP_DIR)/zlib/build $(TEMP_DIR)/zlib \ && make -j$(CORES) \ && make install \ && cd $(ROOTDIR) \ @@ -207,24 +236,39 @@ hdf5/install: $(LIB_PATH)/libhdf5.a $(LIB_PATH)/libhdf5.a: $(LIB_PATH) \ zlib/install mkdir -p $(TEMP_DIR)/hdf5 \ - && curl -fsSL https://github.com/HDFGroup/hdf5/releases/download/$(HDF5_VERSION)/hdf5.tar.gz -o $(TEMP_DIR)/hdf5.tar.gz \ - && tar -xzvf $(TEMP_DIR)/hdf5.tar.gz -C $(TEMP_DIR)/hdf5 --strip-components 2 \ + && curl -fsSL https://github.com/HDFGroup/hdf5/archive/refs/tags/$(HDF5_VERSION).tar.gz -o $(TEMP_DIR)/hdf5.tar.gz \ + && tar -xzvf $(TEMP_DIR)/hdf5.tar.gz -C $(TEMP_DIR)/hdf5 --strip-components 1 \ && mkdir -p $(TEMP_DIR)/hdf5/build \ && cd $(TEMP_DIR)/hdf5/build \ && cmake -DCMAKE_BUILD_TYPE=Release \ -DBUILD_SHARED_LIBS=OFF \ -DBUILD_STATIC_EXECS=ON \ -DBUILD_TESTING=OFF \ - -DCMAKE_INSTALL_PREFIX=$(USR_LOCAL) \ - -DH5_ZLIB_INCLUDE_DIR=$(USR_LOCAL)/include \ - -DH5_ZLIB_LIBRARY=$(LIB_PATH)/libz.a \ -DHDF5_BUILD_CPP_LIB=OFF \ -DHDF5_BUILD_HL_LIB=ON \ -DHDF5_BUILD_STATIC_EXECS=ON \ -DHDF5_BUILD_TOOLS=OFF \ -DHDF5_ENABLE_Z_LIB_SUPPORT=ON \ - .. \ + -DH5_ZLIB_INCLUDE_DIR=$(USR_LOCAL)/include \ + -DH5_ZLIB_LIBRARY=$(LIB_PATH)/libz.a \ + -DCMAKE_CXX_FLAGS="$(CXXFLAGS)" \ + -DCMAKE_C_FLAGS="$(CFLAGS)" \ + -DCMAKE_INSTALL_LIBDIR=$(LIB_PATH) \ + -DCMAKE_INSTALL_PREFIX=$(USR_LOCAL) \ + -B $(TEMP_DIR)/hdf5/build $(TEMP_DIR)/hdf5 \ && make -j$(CORES) \ && make install \ && cd $(ROOTDIR) \ - && rm -rf $(TEMP_DIR)/hdf5.tar.gz $(TEMP_DIR)/HDF5_VERSION + && rm -rf $(TEMP_DIR)/hdf5.tar.gz $(TEMP_DIR)/hdf5 + +.PHONY: yq/install +## install yq +yq/install: $(BINDIR)/yq + +$(BINDIR)/yq: + mkdir -p $(BINDIR) + $(eval DARCH := $(subst aarch64,arm64,$(ARCH))) + cd $(TEMP_DIR) \ + && curl -fsSL https://github.com/mikefarah/yq/releases/download/$(YQ_VERSION)/yq_$(OS)_$(subst x86_64,amd64,$(shell echo $(DARCH) | tr '[:upper:]' '[:lower:]')) -o $(BINDIR)/yq \ + && chmod a+x $(BINDIR)/yq + diff --git a/README.md b/README.md index a03458bf5b..9d692f15ca 100755 --- a/README.md +++ b/README.md @@ -10,7 +10,6 @@ [![Go Reference](https://pkg.go.dev/badge/github.com/vdaas/vald.svg)](https://pkg.go.dev/github.com/vdaas/vald) [![Codacy Badge](https://img.shields.io/codacy/grade/a6e544eee7bc49e08a000bb10ba3deed?style=flat-square)](https://www.codacy.com/app/i.can.feel.gravity/vald?utm_source=github.com&utm_medium=referral&utm_content=vdaas/vald&utm_campaign=Badge_Grade) [![Go Report Card](https://goreportcard.com/badge/github.com/vdaas/vald?style=flat-square)](https://goreportcard.com/report/github.com/vdaas/vald) -[![DepShield Badge](https://depshield.sonatype.org/badges/vdaas/vald/depshield.svg?style=flat-square)](https://depshield.github.io) [![FOSSA Status](https://app.fossa.com/api/projects/custom%2B21465%2Fvald.svg?type=small)](https://app.fossa.com/projects/custom%2B21465%2Fvald?ref=badge_small) [![DeepSource](https://static.deepsource.io/deepsource-badge-light-mini.svg)](https://deepsource.io/gh/vdaas/vald/?ref=repository-badge) [![DeepSource](https://deepsource.io/gh/vdaas/vald.svg/?label=resolved+issues&show_trend=true&token=UpNEsc0zsAfGw-MPPa6O05Lb)](https://deepsource.io/gh/vdaas/vald/?ref=repository-badge) @@ -265,7 +264,7 @@ make init -[![All Contributors](https://img.shields.io/badge/all_contributors-18-orange.svg?style=flat-square)](#contributors) +[![All Contributors](https://img.shields.io/badge/all_contributors-21-orange.svg?style=flat-square)](#contributors) @@ -299,6 +298,9 @@ Thanks goes to these wonderful people ([emoji key](https://allcontributors.org/d Yusuke Kadowaki
Yusuke Kadowaki

💻 ⚠️ aknishid
aknishid

💻 🚧 📖 Hrichik Mazumder
Hrichik Mazumder

📖 + Shunya Morihira (森平 隼矢)
Shunya Morihira (森平 隼矢)

🔧 💻 + miyamoto
miyamoto

💻 🔬 + s-shiraki
s-shiraki

⚠️ 💻 diff --git a/apis/docs/buf.gen.doc.yaml b/apis/docs/buf.gen.doc.yaml new file mode 100644 index 0000000000..ca05f0852d --- /dev/null +++ b/apis/docs/buf.gen.doc.yaml @@ -0,0 +1,27 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: v2 +managed: + disable: + - file_option: go_package + module: buf.build/googleapis/googleapis + override: + - file_option: go_package_prefix + value: github.com/vdaas/vald/apis/grpc +plugins: + - local: protoc-gen-doc + out: apis/docs/v1 + opt: apis/docs/v1/doc.md.tmpl,doc.md diff --git a/apis/docs/buf.gen.payload.yaml b/apis/docs/buf.gen.payload.yaml new file mode 100644 index 0000000000..183a5c0234 --- /dev/null +++ b/apis/docs/buf.gen.payload.yaml @@ -0,0 +1,29 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: v2 +managed: + disable: + - file_option: go_package + module: buf.build/googleapis/googleapis + override: + - file_option: go_package_prefix + value: github.com/vdaas/vald/apis/grpc +plugins: + - local: protoc-gen-doc + out: apis/docs/v1 + opt: apis/docs/v1/payload.tmpl,payload.md.tmpl +inputs: + - proto_file: apis/proto/v1/payload/payload.proto diff --git a/apis/docs/buf.gen.tmpl.yaml b/apis/docs/buf.gen.tmpl.yaml new file mode 100644 index 0000000000..9748220460 --- /dev/null +++ b/apis/docs/buf.gen.tmpl.yaml @@ -0,0 +1,27 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +version: v2 +managed: + disable: + - file_option: go_package + module: buf.build/googleapis/googleapis + override: + - file_option: go_package_prefix + value: github.com/vdaas/vald/apis/grpc +plugins: + - local: protoc-gen-doc + out: apis/docs/v1 + opt: apis/docs/v1/doc.tmpl,_doc.md.tmpl diff --git a/apis/docs/v1/doc.tmpl b/apis/docs/v1/doc.tmpl new file mode 100644 index 0000000000..243c951243 --- /dev/null +++ b/apis/docs/v1/doc.tmpl @@ -0,0 +1,66 @@ +{{- range .Files -}} +{{- range .Services -}} +{{- $descs := dict -}} +{{- range $desc := (splitList "---\n" .Description) -}} +{{- $lines := splitList "\n" $desc -}} +{{- $_ := set $descs (first $lines) (join "\n" (rest $lines)) -}} +{{- end -}} +# Vald {{ .Name }} APIs + +## Overview + +{{ if hasKey $descs "Overview" }}{{ $descs.Overview | trim }}{{ end }} + +```rpc +service {{.Name}} { + +{{ range .Methods }} rpc {{ .Name }}({{ .RequestFullType }}) returns ({{ .ResponseFullType }}) {} +{{ end }} +} +``` + +{{ range .Methods }} + +{{- $descs := dict -}} +{{- range $desc := (splitList "---\n" .Description) -}} +{{- $lines := splitList "\n" $desc -}} +{{- $_ := set $descs (first $lines) (join "\n" (rest $lines)) -}} +{{- end -}} +## {{ .Name }} RPC + +{{ if hasKey $descs "Overview" }}{{ $descs.Overview | trim }}{{ end }} + +### Input + +- the scheme of `{{ .RequestFullType }}` + + ```rpc +{{ printf "{{- template \"scheme:%s\" }}\n" .RequestFullType }} + ``` +{{ printf "{{ template \"field:%s\" }}" .RequestFullType }} +### Output + +- the scheme of `{{ .ResponseFullType }}` + + ```rpc +{{ printf "{{- template \"scheme:%s\" }}\n" .ResponseFullType }} + ``` +{{ printf "{{ template \"field:%s\" }}" .ResponseFullType }} +{{ if hasKey $descs "Status Code" }} +### Status Code + +| code | description | +| :--: | :---------------- | +{{ pluck "Status Code" $descs | first | trim }} + +Please refer to [Response Status Code](../status.md) for more details. +{{ end }} +{{ if hasKey $descs "Troubleshooting" }} +### Troubleshooting + +{{ pluck "Troubleshooting" $descs | first | trim }} +{{ end }} + +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/apis/docs/v1/docs.md b/apis/docs/v1/docs.md index 56ba9668e1..e0a8cde5e7 100644 --- a/apis/docs/v1/docs.md +++ b/apis/docs/v1/docs.md @@ -100,6 +100,7 @@ - [Update.MultiRequest](#payload-v1-Update-MultiRequest) - [Update.ObjectRequest](#payload-v1-Update-ObjectRequest) - [Update.Request](#payload-v1-Update-Request) + - [Update.TimestampRequest](#payload-v1-Update-TimestampRequest) - [Upsert](#payload-v1-Upsert) - [Upsert.Config](#payload-v1-Upsert-Config) - [Upsert.MultiObjectRequest](#payload-v1-Upsert-MultiObjectRequest) @@ -1154,6 +1155,18 @@ Represent the update request. | vector | [Object.Vector](#payload-v1-Object-Vector) | | The vector to be updated. | | config | [Update.Config](#payload-v1-Update-Config) | | The configuration of the update request. | + + +### Update.TimestampRequest + +Represent a vector meta data. + +| Field | Type | Label | Description | +| --------- | ----------------- | ----- | ------------------------------------------------- | +| id | [string](#string) | | The vector ID. | +| timestamp | [int64](#int64) | | timestamp represents when this vector inserted. | +| force | [bool](#bool) | | force represents forcefully update the timestamp. | + ### Upsert @@ -1352,7 +1365,7 @@ Represent the ingress filter service. ## v1/mirror/mirror.proto -Copyright (C) 2019-2025 vdaas.org vald team <vald@vdaas.org> +Copyright (C) 2019-2024 vdaas.org vald team <vald@vdaas.org> Licensed under the Apache License, Version 2.0 (the "License"); You may not use this file except in compliance with the License. @@ -1782,11 +1795,12 @@ Search service provides ways to search indexed vectors. Update service provides ways to update indexed vectors. -| Method Name | Request Type | Response Type | Description | -| ------------ | ------------------------------------------------------------------ | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------- | -| Update | [.payload.v1.Update.Request](#payload-v1-Update-Request) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update an indexed vector. | -| StreamUpdate | [.payload.v1.Update.Request](#payload-v1-Update-Request) stream | [.payload.v1.Object.StreamLocation](#payload-v1-Object-StreamLocation) stream | A method to update multiple indexed vectors by bidirectional streaming. | -| MultiUpdate | [.payload.v1.Update.MultiRequest](#payload-v1-Update-MultiRequest) | [.payload.v1.Object.Locations](#payload-v1-Object-Locations) | A method to update multiple indexed vectors in a single request. | +| Method Name | Request Type | Response Type | Description | +| --------------- | -------------------------------------------------------------------------- | ----------------------------------------------------------------------------- | ----------------------------------------------------------------------- | +| Update | [.payload.v1.Update.Request](#payload-v1-Update-Request) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update an indexed vector. | +| StreamUpdate | [.payload.v1.Update.Request](#payload-v1-Update-Request) stream | [.payload.v1.Object.StreamLocation](#payload-v1-Object-StreamLocation) stream | A method to update multiple indexed vectors by bidirectional streaming. | +| MultiUpdate | [.payload.v1.Update.MultiRequest](#payload-v1-Update-MultiRequest) | [.payload.v1.Object.Locations](#payload-v1-Object-Locations) | A method to update multiple indexed vectors in a single request. | +| UpdateTimestamp | [.payload.v1.Update.TimestampRequest](#payload-v1-Update-TimestampRequest) | [.payload.v1.Object.Location](#payload-v1-Object-Location) | A method to update timestamp an indexed vector. | diff --git a/apis/docs/v1/filter.md b/apis/docs/v1/filter.md new file mode 100644 index 0000000000..fa4b1643f6 --- /dev/null +++ b/apis/docs/v1/filter.md @@ -0,0 +1,1552 @@ +# Vald Filter APIs + +## Overview + +Filter Server is responsible for providing insert, update, upsert and search interface for `Vald Filter Gateway`. + +Vald Filter Gateway forward user request to user-defined ingress/egress filter components allowing user to run custom logic. + +```rpc +service Filter { + + rpc SearchObject(payload.v1.Search.ObjectRequest) returns (payload.v1.Search.Response) {} + rpc MultiSearchObject(payload.v1.Search.MultiObjectRequest) returns (payload.v1.Search.Responses) {} + rpc StreamSearchObject(payload.v1.Search.ObjectRequest) returns (payload.v1.Search.StreamResponse) {} + rpc InsertObject(payload.v1.Insert.ObjectRequest) returns (payload.v1.Object.Location) {} + rpc StreamInsertObject(payload.v1.Insert.ObjectRequest) returns (payload.v1.Object.StreamLocation) {} + rpc MultiInsertObject(payload.v1.Insert.MultiObjectRequest) returns (payload.v1.Object.Locations) {} + rpc UpdateObject(payload.v1.Update.ObjectRequest) returns (payload.v1.Object.Location) {} + rpc StreamUpdateObject(payload.v1.Update.ObjectRequest) returns (payload.v1.Object.StreamLocation) {} + rpc MultiUpdateObject(payload.v1.Update.MultiObjectRequest) returns (payload.v1.Object.Locations) {} + rpc UpsertObject(payload.v1.Upsert.ObjectRequest) returns (payload.v1.Object.Location) {} + rpc StreamUpsertObject(payload.v1.Upsert.ObjectRequest) returns (payload.v1.Object.StreamLocation) {} + rpc MultiUpsertObject(payload.v1.Upsert.MultiObjectRequest) returns (payload.v1.Object.Locations) {} + +} +``` + +## SearchObject RPC + +SearchObject RPC is the method to search object(s) similar to request object. + +### Input + +- the scheme of `payload.v1.Search.ObjectRequest` + + ```rpc + message Search.ObjectRequest { + bytes object = 1; + Search.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + ``` + + - Search.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | bytes | | The binary object to be searched. | + | config | Search.Config | | The configuration of the search request. | + | vectorizer | Filter.Target | | Filter configuration. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Search.Response` + + ```rpc + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## MultiSearchObject RPC + +StreamSearchObject RPC is the method to search vectors with multi queries(objects) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+By using the bidirectional streaming RPC, the search request can be communicated in any order between client and server. +Each Search request and response are independent. + +### Input + +- the scheme of `payload.v1.Search.MultiObjectRequest` + + ```rpc + message Search.MultiObjectRequest { + repeated Search.ObjectRequest requests = 1; + } + + message Search.ObjectRequest { + bytes object = 1; + Search.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + ``` + + - Search.MultiObjectRequest + + | field | type | label | description | + | :------: | :------------------- | :------- | :-------------------------------------------------------------- | + | requests | Search.ObjectRequest | repeated | Represent the multiple search by binary object request content. | + + - Search.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | bytes | | The binary object to be searched. | + | config | Search.Config | | The configuration of the search request. | + | vectorizer | Filter.Target | | Filter configuration. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Search.Responses` + + ```rpc + message Search.Responses { + repeated Search.Response responses = 1; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Responses + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------------------------------------------- | + | responses | Search.Response | repeated | Represent the multiple search response content. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## StreamSearchObject RPC + +MultiSearchObject RPC is the method to search objects with multiple objects in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Search.ObjectRequest` + + ```rpc + message Search.ObjectRequest { + bytes object = 1; + Search.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + ``` + + - Search.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | bytes | | The binary object to be searched. | + | config | Search.Config | | The configuration of the search request. | + | vectorizer | Filter.Target | | Filter configuration. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Search.StreamResponse` + + ```rpc + message Search.StreamResponse { + Search.Response response = 1; + google.rpc.Status status = 2; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.StreamResponse + + | field | type | label | description | + | :------: | :---------------- | :---- | :----------------------------- | + | response | Search.Response | | Represent the search response. | + | status | google.rpc.Status | | The RPC error status. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## InsertObject RPC + +InsertObject RPC is the method to insert object through Vald Filter Gateway. + +### Input + +- the scheme of `payload.v1.Insert.ObjectRequest` + + ```rpc + message Insert.ObjectRequest { + Object.Blob object = 1; + Insert.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Insert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Insert.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be inserted. | + | config | Insert.Config | | The configuration of the insert request. | + | vectorizer | Filter.Target | | Filter configurations. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Insert.Config + + | field | type | label | description | + | :---------------------: | :------------ | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during insert operation. | + | filters | Filter.Config | | Filter configurations. | + | timestamp | int64 | | Insert timestamp. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.Location` + + ```rpc + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## StreamInsertObject RPC + +StreamInsertObject RPC is the method to add new multiple object using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc). + +By using the bidirectional streaming RPC, the insert request can be communicated in any order between client and server. +Each Insert request and response are independent. +It's the recommended method to insert a large number of objects. + +### Input + +- the scheme of `payload.v1.Insert.ObjectRequest` + + ```rpc + message Insert.ObjectRequest { + Object.Blob object = 1; + Insert.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Insert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Insert.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be inserted. | + | config | Insert.Config | | The configuration of the insert request. | + | vectorizer | Filter.Target | | Filter configurations. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Insert.Config + + | field | type | label | description | + | :---------------------: | :------------ | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during insert operation. | + | filters | Filter.Config | | Filter configurations. | + | timestamp | int64 | | Insert timestamp. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.StreamLocation` + + ```rpc + message Object.StreamLocation { + Object.Location location = 1; + google.rpc.Status status = 2; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.StreamLocation + + | field | type | label | description | + | :------: | :---------------- | :---- | :-------------------- | + | location | Object.Location | | The vector location. | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## MultiInsertObject RPC + +MultiInsertObject RPC is the method to add multiple new objects in **1** request. + +### Input + +- the scheme of `payload.v1.Insert.MultiObjectRequest` + + ```rpc + message Insert.MultiObjectRequest { + repeated Insert.ObjectRequest requests = 1; + } + + message Insert.ObjectRequest { + Object.Blob object = 1; + Insert.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Insert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Insert.MultiObjectRequest + + | field | type | label | description | + | :------: | :------------------- | :------- | :------------------------------------------- | + | requests | Insert.ObjectRequest | repeated | Represent multiple insert by object content. | + + - Insert.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be inserted. | + | config | Insert.Config | | The configuration of the insert request. | + | vectorizer | Filter.Target | | Filter configurations. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Insert.Config + + | field | type | label | description | + | :---------------------: | :------------ | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during insert operation. | + | filters | Filter.Config | | Filter configurations. | + | timestamp | int64 | | Insert timestamp. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.Locations` + + ```rpc + message Object.Locations { + repeated Object.Location locations = 1; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Locations + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------- | + | locations | Object.Location | repeated | | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## UpdateObject RPC + +UpdateObject RPC is the method to update a single vector. + +### Input + +- the scheme of `payload.v1.Update.ObjectRequest` + + ```rpc + message Update.ObjectRequest { + Object.Blob object = 1; + Update.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Update.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Update.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be updated. | + | config | Update.Config | | The configuration of the update request. | + | vectorizer | Filter.Target | | Filter target. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Update.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during update operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Update timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.Location` + + ```rpc + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## StreamUpdateObject RPC + +StreamUpdateObject RPC is the method to update multiple objects using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+By using the bidirectional streaming RPC, the update request can be communicated in any order between client and server. +Each Update request and response are independent. +It's the recommended method to update the large amount of objects. + +### Input + +- the scheme of `payload.v1.Update.ObjectRequest` + + ```rpc + message Update.ObjectRequest { + Object.Blob object = 1; + Update.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Update.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Update.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be updated. | + | config | Update.Config | | The configuration of the update request. | + | vectorizer | Filter.Target | | Filter target. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Update.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during update operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Update timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.StreamLocation` + + ```rpc + message Object.StreamLocation { + Object.Location location = 1; + google.rpc.Status status = 2; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.StreamLocation + + | field | type | label | description | + | :------: | :---------------- | :---- | :-------------------- | + | location | Object.Location | | The vector location. | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## MultiUpdateObject RPC + +MultiUpdateObject is the method to update multiple objects in **1** request. + +
+gRPC has the message size limitation.
+Please be careful that the size of the request exceed the limit. +
+ +### Input + +- the scheme of `payload.v1.Update.MultiObjectRequest` + + ```rpc + message Update.MultiObjectRequest { + repeated Update.ObjectRequest requests = 1; + } + + message Update.ObjectRequest { + Object.Blob object = 1; + Update.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Update.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Update.MultiObjectRequest + + | field | type | label | description | + | :------: | :------------------- | :------- | :---------------------------------------------------- | + | requests | Update.ObjectRequest | repeated | Represent the multiple update object request content. | + + - Update.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be updated. | + | config | Update.Config | | The configuration of the update request. | + | vectorizer | Filter.Target | | Filter target. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Update.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during update operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Update timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.Locations` + + ```rpc + message Object.Locations { + repeated Object.Location locations = 1; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Locations + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------- | + | locations | Object.Location | repeated | | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## UpsertObject RPC + +UpsertObject RPC is the method to update a single object and add a new single object. + +### Input + +- the scheme of `payload.v1.Upsert.ObjectRequest` + + ```rpc + message Upsert.ObjectRequest { + Object.Blob object = 1; + Upsert.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Upsert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Upsert.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be upserted. | + | config | Upsert.Config | | The configuration of the upsert request. | + | vectorizer | Filter.Target | | Filter target. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Upsert.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Upsert timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.Location` + + ```rpc + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## StreamUpsertObject RPC + +UpsertObject RPC is the method to update a single object and add a new single object. + +### Input + +- the scheme of `payload.v1.Upsert.ObjectRequest` + + ```rpc + message Upsert.ObjectRequest { + Object.Blob object = 1; + Upsert.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Upsert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Upsert.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be upserted. | + | config | Upsert.Config | | The configuration of the upsert request. | + | vectorizer | Filter.Target | | Filter target. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Upsert.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Upsert timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.StreamLocation` + + ```rpc + message Object.StreamLocation { + Object.Location location = 1; + google.rpc.Status status = 2; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.StreamLocation + + | field | type | label | description | + | :------: | :---------------- | :---- | :-------------------- | + | location | Object.Location | | The vector location. | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +## MultiUpsertObject RPC + +MultiUpsertObject is the method to update existing multiple objects and add new multiple objects in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Upsert.MultiObjectRequest` + + ```rpc + message Upsert.MultiObjectRequest { + repeated Upsert.ObjectRequest requests = 1; + } + + message Upsert.ObjectRequest { + Object.Blob object = 1; + Upsert.Config config = 2; + Filter.Target vectorizer = 3; + } + + message Object.Blob { + string id = 1; + bytes object = 2; + } + + message Upsert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + ``` + + - Upsert.MultiObjectRequest + + | field | type | label | description | + | :------: | :------------------- | :------- | :---------------------------------------------------- | + | requests | Upsert.ObjectRequest | repeated | Represent the multiple upsert object request content. | + + - Upsert.ObjectRequest + + | field | type | label | description | + | :--------: | :------------ | :---- | :--------------------------------------- | + | object | Object.Blob | | The binary object to be upserted. | + | config | Upsert.Config | | The configuration of the upsert request. | + | vectorizer | Filter.Target | | Filter target. | + + - Object.Blob + + | field | type | label | description | + | :----: | :----- | :---- | :----------------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | + + - Upsert.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Upsert timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + +### Output + +- the scheme of `payload.v1.Object.Locations` + + ```rpc + message Object.Locations { + repeated Object.Location locations = 1; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Locations + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------- | + | locations | Object.Location | repeated | | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. diff --git a/apis/docs/v1/flush.md b/apis/docs/v1/flush.md new file mode 100644 index 0000000000..504c14ef51 --- /dev/null +++ b/apis/docs/v1/flush.md @@ -0,0 +1,68 @@ +# Vald Flush APIs + +## Overview + +Flush Service is responsible for removing all vectors that are indexed and uncommitted in the `vald-agent`. + +```rpc +service Flush { + + rpc Flush(payload.v1.Flush.Request) returns (payload.v1.Info.Index.Count) {} + +} +``` + +## Flush RPC + +Flush RPC is the method to remove all vectors. + +### Input + +- the scheme of `payload.v1.Flush.Request` + + ```rpc + message Flush.Request { + // empty + } + + ``` + + - Flush.Request + + empty + +### Output + +- the scheme of `payload.v1.Info.Index.Count` + + ```rpc + message Info.Index.Count { + uint32 stored = 1; + uint32 uncommitted = 2; + bool indexing = 3; + bool saving = 4; + } + + ``` + + - Info.Index.Count + + | field | type | label | description | + | :---------: | :----- | :---- | :--------------------------- | + | stored | uint32 | | The stored index count. | + | uncommitted | uint32 | | The uncommitted index count. | + | indexing | bool | | The indexing index count. | + | saving | bool | | The saving index count. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. diff --git a/apis/docs/v1/index.md b/apis/docs/v1/index.md new file mode 100644 index 0000000000..5379f4efb5 --- /dev/null +++ b/apis/docs/v1/index.md @@ -0,0 +1,473 @@ +# Vald Index APIs + +## Overview + +Represent the index manager service. + +```rpc +service Index { + + rpc IndexInfo(payload.v1.Empty) returns (payload.v1.Info.Index.Count) {} + rpc IndexDetail(payload.v1.Empty) returns (payload.v1.Info.Index.Detail) {} + rpc IndexStatistics(payload.v1.Empty) returns (payload.v1.Info.Index.Statistics) {} + rpc IndexStatisticsDetail(payload.v1.Empty) returns (payload.v1.Info.Index.StatisticsDetail) {} + rpc IndexProperty(payload.v1.Empty) returns (payload.v1.Info.Index.PropertyDetail) {} + +} +``` + +## IndexInfo RPC + +Represent the RPC to get the index information. + +### Input + +- the scheme of `payload.v1.Empty` + + ```rpc + message Empty { + // empty + } + + ``` + + - Empty + + empty + +### Output + +- the scheme of `payload.v1.Info.Index.Count` + + ```rpc + message Info.Index.Count { + uint32 stored = 1; + uint32 uncommitted = 2; + bool indexing = 3; + bool saving = 4; + } + + ``` + + - Info.Index.Count + + | field | type | label | description | + | :---------: | :----- | :---- | :--------------------------- | + | stored | uint32 | | The stored index count. | + | uncommitted | uint32 | | The uncommitted index count. | + | indexing | bool | | The indexing index count. | + | saving | bool | | The saving index count. | + +## IndexDetail RPC + +Represent the RPC to get the index information for each agents. + +### Input + +- the scheme of `payload.v1.Empty` + + ```rpc + message Empty { + // empty + } + + ``` + + - Empty + + empty + +### Output + +- the scheme of `payload.v1.Info.Index.Detail` + + ```rpc + message Info.Index.Detail { + repeated Info.Index.Detail.CountsEntry counts = 1; + uint32 replica = 2; + uint32 live_agents = 3; + } + + message Info.Index.Detail.CountsEntry { + string key = 1; + Info.Index.Count value = 2; + } + + message Info.Index.Count { + uint32 stored = 1; + uint32 uncommitted = 2; + bool indexing = 3; + bool saving = 4; + } + + ``` + + - Info.Index.Detail + + | field | type | label | description | + | :---------: | :---------------------------- | :------- | :--------------------------------- | + | counts | Info.Index.Detail.CountsEntry | repeated | count infos for each agents | + | replica | uint32 | | index replica of vald cluster | + | live_agents | uint32 | | live agent replica of vald cluster | + + - Info.Index.Detail.CountsEntry + + | field | type | label | description | + | :---: | :--------------- | :---- | :---------- | + | key | string | | | + | value | Info.Index.Count | | | + + - Info.Index.Count + + | field | type | label | description | + | :---------: | :----- | :---- | :--------------------------- | + | stored | uint32 | | The stored index count. | + | uncommitted | uint32 | | The uncommitted index count. | + | indexing | bool | | The indexing index count. | + | saving | bool | | The saving index count. | + +## IndexStatistics RPC + +Represent the RPC to get the index statistics. + +### Input + +- the scheme of `payload.v1.Empty` + + ```rpc + message Empty { + // empty + } + + ``` + + - Empty + + empty + +### Output + +- the scheme of `payload.v1.Info.Index.Statistics` + + ```rpc + message Info.Index.Statistics { + bool valid = 1; + int32 median_indegree = 2; + int32 median_outdegree = 3; + uint64 max_number_of_indegree = 4; + uint64 max_number_of_outdegree = 5; + uint64 min_number_of_indegree = 6; + uint64 min_number_of_outdegree = 7; + uint64 mode_indegree = 8; + uint64 mode_outdegree = 9; + uint64 nodes_skipped_for_10_edges = 10; + uint64 nodes_skipped_for_indegree_distance = 11; + uint64 number_of_edges = 12; + uint64 number_of_indexed_objects = 13; + uint64 number_of_nodes = 14; + uint64 number_of_nodes_without_edges = 15; + uint64 number_of_nodes_without_indegree = 16; + uint64 number_of_objects = 17; + uint64 number_of_removed_objects = 18; + uint64 size_of_object_repository = 19; + uint64 size_of_refinement_object_repository = 20; + double variance_of_indegree = 21; + double variance_of_outdegree = 22; + double mean_edge_length = 23; + double mean_edge_length_for_10_edges = 24; + double mean_indegree_distance_for_10_edges = 25; + double mean_number_of_edges_per_node = 26; + double c1_indegree = 27; + double c5_indegree = 28; + double c95_outdegree = 29; + double c99_outdegree = 30; + repeated int64 indegree_count = 31; + repeated uint64 outdegree_histogram = 32; + repeated uint64 indegree_histogram = 33; + } + + ``` + + - Info.Index.Statistics + + | field | type | label | description | + | :----------------------------------: | :----- | :------- | :---------- | + | valid | bool | | | + | median_indegree | int32 | | | + | median_outdegree | int32 | | | + | max_number_of_indegree | uint64 | | | + | max_number_of_outdegree | uint64 | | | + | min_number_of_indegree | uint64 | | | + | min_number_of_outdegree | uint64 | | | + | mode_indegree | uint64 | | | + | mode_outdegree | uint64 | | | + | nodes_skipped_for_10_edges | uint64 | | | + | nodes_skipped_for_indegree_distance | uint64 | | | + | number_of_edges | uint64 | | | + | number_of_indexed_objects | uint64 | | | + | number_of_nodes | uint64 | | | + | number_of_nodes_without_edges | uint64 | | | + | number_of_nodes_without_indegree | uint64 | | | + | number_of_objects | uint64 | | | + | number_of_removed_objects | uint64 | | | + | size_of_object_repository | uint64 | | | + | size_of_refinement_object_repository | uint64 | | | + | variance_of_indegree | double | | | + | variance_of_outdegree | double | | | + | mean_edge_length | double | | | + | mean_edge_length_for_10_edges | double | | | + | mean_indegree_distance_for_10_edges | double | | | + | mean_number_of_edges_per_node | double | | | + | c1_indegree | double | | | + | c5_indegree | double | | | + | c95_outdegree | double | | | + | c99_outdegree | double | | | + | indegree_count | int64 | repeated | | + | outdegree_histogram | uint64 | repeated | | + | indegree_histogram | uint64 | repeated | | + +## IndexStatisticsDetail RPC + +Represent the RPC to get the index statistics for each agents. + +### Input + +- the scheme of `payload.v1.Empty` + + ```rpc + message Empty { + // empty + } + + ``` + + - Empty + + empty + +### Output + +- the scheme of `payload.v1.Info.Index.StatisticsDetail` + + ```rpc + message Info.Index.StatisticsDetail { + repeated Info.Index.StatisticsDetail.DetailsEntry details = 1; + } + + message Info.Index.StatisticsDetail.DetailsEntry { + string key = 1; + Info.Index.Statistics value = 2; + } + + message Info.Index.Statistics { + bool valid = 1; + int32 median_indegree = 2; + int32 median_outdegree = 3; + uint64 max_number_of_indegree = 4; + uint64 max_number_of_outdegree = 5; + uint64 min_number_of_indegree = 6; + uint64 min_number_of_outdegree = 7; + uint64 mode_indegree = 8; + uint64 mode_outdegree = 9; + uint64 nodes_skipped_for_10_edges = 10; + uint64 nodes_skipped_for_indegree_distance = 11; + uint64 number_of_edges = 12; + uint64 number_of_indexed_objects = 13; + uint64 number_of_nodes = 14; + uint64 number_of_nodes_without_edges = 15; + uint64 number_of_nodes_without_indegree = 16; + uint64 number_of_objects = 17; + uint64 number_of_removed_objects = 18; + uint64 size_of_object_repository = 19; + uint64 size_of_refinement_object_repository = 20; + double variance_of_indegree = 21; + double variance_of_outdegree = 22; + double mean_edge_length = 23; + double mean_edge_length_for_10_edges = 24; + double mean_indegree_distance_for_10_edges = 25; + double mean_number_of_edges_per_node = 26; + double c1_indegree = 27; + double c5_indegree = 28; + double c95_outdegree = 29; + double c99_outdegree = 30; + repeated int64 indegree_count = 31; + repeated uint64 outdegree_histogram = 32; + repeated uint64 indegree_histogram = 33; + } + + ``` + + - Info.Index.StatisticsDetail + + | field | type | label | description | + | :-----: | :--------------------------------------- | :------- | :-------------------------- | + | details | Info.Index.StatisticsDetail.DetailsEntry | repeated | count infos for each agents | + + - Info.Index.StatisticsDetail.DetailsEntry + + | field | type | label | description | + | :---: | :-------------------- | :---- | :---------- | + | key | string | | | + | value | Info.Index.Statistics | | | + + - Info.Index.Statistics + + | field | type | label | description | + | :----------------------------------: | :----- | :------- | :---------- | + | valid | bool | | | + | median_indegree | int32 | | | + | median_outdegree | int32 | | | + | max_number_of_indegree | uint64 | | | + | max_number_of_outdegree | uint64 | | | + | min_number_of_indegree | uint64 | | | + | min_number_of_outdegree | uint64 | | | + | mode_indegree | uint64 | | | + | mode_outdegree | uint64 | | | + | nodes_skipped_for_10_edges | uint64 | | | + | nodes_skipped_for_indegree_distance | uint64 | | | + | number_of_edges | uint64 | | | + | number_of_indexed_objects | uint64 | | | + | number_of_nodes | uint64 | | | + | number_of_nodes_without_edges | uint64 | | | + | number_of_nodes_without_indegree | uint64 | | | + | number_of_objects | uint64 | | | + | number_of_removed_objects | uint64 | | | + | size_of_object_repository | uint64 | | | + | size_of_refinement_object_repository | uint64 | | | + | variance_of_indegree | double | | | + | variance_of_outdegree | double | | | + | mean_edge_length | double | | | + | mean_edge_length_for_10_edges | double | | | + | mean_indegree_distance_for_10_edges | double | | | + | mean_number_of_edges_per_node | double | | | + | c1_indegree | double | | | + | c5_indegree | double | | | + | c95_outdegree | double | | | + | c99_outdegree | double | | | + | indegree_count | int64 | repeated | | + | outdegree_histogram | uint64 | repeated | | + | indegree_histogram | uint64 | repeated | | + +## IndexProperty RPC + +Represent the RPC to get the index property. + +### Input + +- the scheme of `payload.v1.Empty` + + ```rpc + message Empty { + // empty + } + + ``` + + - Empty + + empty + +### Output + +- the scheme of `payload.v1.Info.Index.PropertyDetail` + + ```rpc + message Info.Index.PropertyDetail { + repeated Info.Index.PropertyDetail.DetailsEntry details = 1; + } + + message Info.Index.PropertyDetail.DetailsEntry { + string key = 1; + Info.Index.Property value = 2; + } + + message Info.Index.Property { + int32 dimension = 1; + int32 thread_pool_size = 2; + string object_type = 3; + string distance_type = 4; + string index_type = 5; + string database_type = 6; + string object_alignment = 7; + int32 path_adjustment_interval = 8; + int32 graph_shared_memory_size = 9; + int32 tree_shared_memory_size = 10; + int32 object_shared_memory_size = 11; + int32 prefetch_offset = 12; + int32 prefetch_size = 13; + string accuracy_table = 14; + string search_type = 15; + float max_magnitude = 16; + int32 n_of_neighbors_for_insertion_order = 17; + float epsilon_for_insertion_order = 18; + string refinement_object_type = 19; + int32 truncation_threshold = 20; + int32 edge_size_for_creation = 21; + int32 edge_size_for_search = 22; + int32 edge_size_limit_for_creation = 23; + double insertion_radius_coefficient = 24; + int32 seed_size = 25; + string seed_type = 26; + int32 truncation_thread_pool_size = 27; + int32 batch_size_for_creation = 28; + string graph_type = 29; + int32 dynamic_edge_size_base = 30; + int32 dynamic_edge_size_rate = 31; + float build_time_limit = 32; + int32 outgoing_edge = 33; + int32 incoming_edge = 34; + } + + ``` + + - Info.Index.PropertyDetail + + | field | type | label | description | + | :-----: | :------------------------------------- | :------- | :---------- | + | details | Info.Index.PropertyDetail.DetailsEntry | repeated | | + + - Info.Index.PropertyDetail.DetailsEntry + + | field | type | label | description | + | :---: | :------------------ | :---- | :---------- | + | key | string | | | + | value | Info.Index.Property | | | + + - Info.Index.Property + + | field | type | label | description | + | :--------------------------------: | :----- | :---- | :---------- | + | dimension | int32 | | | + | thread_pool_size | int32 | | | + | object_type | string | | | + | distance_type | string | | | + | index_type | string | | | + | database_type | string | | | + | object_alignment | string | | | + | path_adjustment_interval | int32 | | | + | graph_shared_memory_size | int32 | | | + | tree_shared_memory_size | int32 | | | + | object_shared_memory_size | int32 | | | + | prefetch_offset | int32 | | | + | prefetch_size | int32 | | | + | accuracy_table | string | | | + | search_type | string | | | + | max_magnitude | float | | | + | n_of_neighbors_for_insertion_order | int32 | | | + | epsilon_for_insertion_order | float | | | + | refinement_object_type | string | | | + | truncation_threshold | int32 | | | + | edge_size_for_creation | int32 | | | + | edge_size_for_search | int32 | | | + | edge_size_limit_for_creation | int32 | | | + | insertion_radius_coefficient | double | | | + | seed_size | int32 | | | + | seed_type | string | | | + | truncation_thread_pool_size | int32 | | | + | batch_size_for_creation | int32 | | | + | graph_type | string | | | + | dynamic_edge_size_base | int32 | | | + | dynamic_edge_size_rate | int32 | | | + | build_time_limit | float | | | + | outgoing_edge | int32 | | | + | incoming_edge | int32 | | | diff --git a/apis/docs/v1/insert.md b/apis/docs/v1/insert.md new file mode 100644 index 0000000000..40e91e84cc --- /dev/null +++ b/apis/docs/v1/insert.md @@ -0,0 +1,420 @@ +# Vald Insert APIs + +## Overview + +Insert Service is responsible for inserting new vectors into the `vald-agent`. + +```rpc +service Insert { + + rpc Insert(payload.v1.Insert.Request) returns (payload.v1.Object.Location) {} + rpc StreamInsert(payload.v1.Insert.Request) returns (payload.v1.Object.StreamLocation) {} + rpc MultiInsert(payload.v1.Insert.MultiRequest) returns (payload.v1.Object.Locations) {} + +} +``` + +## Insert RPC + +Inset RPC is the method to add a new single vector. + +### Input + +- the scheme of `payload.v1.Insert.Request` + + ```rpc + message Insert.Request { + Object.Vector vector = 1; + Insert.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Insert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Insert.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be inserted. | + | config | Insert.Config | | The configuration of the insert request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Insert.Config + + | field | type | label | description | + | :---------------------: | :------------ | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during insert operation. | + | filters | Filter.Config | | Filter configurations. | + | timestamp | int64 | | Insert timestamp. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.Location` + + ```rpc + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| ALREADY_EXISTS | Request ID is already inserted. | Change request ID. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamInsert RPC + +StreamInsert RPC is the method to add new multiple vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the insert request can be communicated in any order between client and server. +Each Insert request and response are independent. +It's the recommended method to insert a large number of vectors. + +### Input + +- the scheme of `payload.v1.Insert.Request` + + ```rpc + message Insert.Request { + Object.Vector vector = 1; + Insert.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Insert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Insert.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be inserted. | + | config | Insert.Config | | The configuration of the insert request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Insert.Config + + | field | type | label | description | + | :---------------------: | :------------ | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during insert operation. | + | filters | Filter.Config | | Filter configurations. | + | timestamp | int64 | | Insert timestamp. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.StreamLocation` + + ```rpc + message Object.StreamLocation { + Object.Location location = 1; + google.rpc.Status status = 2; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.StreamLocation + + | field | type | label | description | + | :------: | :---------------- | :---- | :-------------------- | + | location | Object.Location | | The vector location. | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| ALREADY_EXISTS | Request ID is already inserted. | Change request ID. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## MultiInsert RPC + +MultiInsert RPC is the method to add multiple new vectors in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Insert.MultiRequest` + + ```rpc + message Insert.MultiRequest { + repeated Insert.Request requests = 1; + } + + message Insert.Request { + Object.Vector vector = 1; + Insert.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Insert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Insert.MultiRequest + + | field | type | label | description | + | :------: | :------------- | :------- | :----------------------------------------- | + | requests | Insert.Request | repeated | Represent multiple insert request content. | + + - Insert.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be inserted. | + | config | Insert.Config | | The configuration of the insert request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Insert.Config + + | field | type | label | description | + | :---------------------: | :------------ | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during insert operation. | + | filters | Filter.Config | | Filter configurations. | + | timestamp | int64 | | Insert timestamp. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.Locations` + + ```rpc + message Object.Locations { + repeated Object.Location locations = 1; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Locations + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------- | + | locations | Object.Location | repeated | | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 6 | ALREADY_EXISTS | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| ALREADY_EXISTS | Request ID is already inserted. | Change request ID. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | diff --git a/apis/docs/v1/mirror.md b/apis/docs/v1/mirror.md new file mode 100644 index 0000000000..cdb0d1cff8 --- /dev/null +++ b/apis/docs/v1/mirror.md @@ -0,0 +1,87 @@ +# Vald Mirror APIs + +## Overview + +Mirror Service is responsible for providing the `Register` interface for the Vald Mirror Gateway. + +```rpc +service Mirror { + + rpc Register(payload.v1.Mirror.Targets) returns (payload.v1.Mirror.Targets) {} + +} +``` + +## Register RPC + +Register RPC is the method to register other Vald Mirror Gateway targets. + +### Input + +- the scheme of `payload.v1.Mirror.Targets` + + ```rpc + message Mirror.Targets { + repeated Mirror.Target targets = 1; + } + + message Mirror.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Mirror.Targets + + | field | type | label | description | + | :-----: | :------------ | :------- | :------------------------------- | + | targets | Mirror.Target | repeated | The multiple target information. | + + - Mirror.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Mirror.Targets` + + ```rpc + message Mirror.Targets { + repeated Mirror.Target targets = 1; + } + + message Mirror.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Mirror.Targets + + | field | type | label | description | + | :-----: | :------------ | :------- | :------------------------------- | + | targets | Mirror.Target | repeated | The multiple target information. | + + - Mirror.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. diff --git a/apis/docs/v1/object.md b/apis/docs/v1/object.md new file mode 100644 index 0000000000..aee45109cb --- /dev/null +++ b/apis/docs/v1/object.md @@ -0,0 +1,430 @@ +# Vald Object APIs + +## Overview + +Object Service is responsible for getting inserted vectors and checking whether vectors are inserted into the `vald-agent`. + +```rpc +service Object { + + rpc Exists(payload.v1.Object.ID) returns (payload.v1.Object.ID) {} + rpc GetObject(payload.v1.Object.VectorRequest) returns (payload.v1.Object.Vector) {} + rpc StreamGetObject(payload.v1.Object.VectorRequest) returns (payload.v1.Object.StreamVector) {} + rpc StreamListObject(payload.v1.Object.List.Request) returns (payload.v1.Object.List.Response) {} + rpc GetTimestamp(payload.v1.Object.TimestampRequest) returns (payload.v1.Object.Timestamp) {} + +} +``` + +## Exists RPC + +Exists RPC is the method to check that a vector exists in the `vald-agent`. + +### Input + +- the scheme of `payload.v1.Object.ID` + + ```rpc + message Object.ID { + string id = 1; + } + + ``` + + - Object.ID + + | field | type | label | description | + | :---: | :----- | :---- | :---------- | + | id | string | | | + +### Output + +- the scheme of `payload.v1.Object.ID` + + ```rpc + message Object.ID { + string id = 1; + } + + ``` + + - Object.ID + + | field | type | label | description | + | :---: | :----- | :---- | :---------- | + | id | string | | | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## GetObject RPC + +GetObject RPC is the method to get the metadata of a vector inserted into the `vald-agent`. + +### Input + +- the scheme of `payload.v1.Object.VectorRequest` + + ```rpc + message Object.VectorRequest { + Object.ID id = 1; + Filter.Config filters = 2; + } + + message Object.ID { + string id = 1; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Object.VectorRequest + + | field | type | label | description | + | :-----: | :------------ | :---- | :--------------------------- | + | id | Object.ID | | The vector ID to be fetched. | + | filters | Filter.Config | | Filter configurations. | + + - Object.ID + + | field | type | label | description | + | :---: | :----- | :---- | :---------- | + | id | string | | | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.Vector` + + ```rpc + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + ``` + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamGetObject RPC + +StreamGetObject RPC is the method to get the metadata of multiple existing vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the GetObject request can be communicated in any order between client and server. +Each Upsert request and response are independent. + +### Input + +- the scheme of `payload.v1.Object.VectorRequest` + + ```rpc + message Object.VectorRequest { + Object.ID id = 1; + Filter.Config filters = 2; + } + + message Object.ID { + string id = 1; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Object.VectorRequest + + | field | type | label | description | + | :-----: | :------------ | :---- | :--------------------------- | + | id | Object.ID | | The vector ID to be fetched. | + | filters | Filter.Config | | Filter configurations. | + + - Object.ID + + | field | type | label | description | + | :---: | :----- | :---- | :---------- | + | id | string | | | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.StreamVector` + + ```rpc + message Object.StreamVector { + Object.Vector vector = 1; + google.rpc.Status status = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + ``` + + - Object.StreamVector + + | field | type | label | description | + | :----: | :---------------- | :---- | :-------------------- | + | vector | Object.Vector | | The vector. | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamListObject RPC + +A method to get all the vectors with server streaming + +### Input + +- the scheme of `payload.v1.Object.List.Request` + + ```rpc + message Object.List.Request { + // empty + } + + ``` + + - Object.List.Request + + empty + +### Output + +- the scheme of `payload.v1.Object.List.Response` + + ```rpc + message Object.List.Response { + Object.Vector vector = 1; + google.rpc.Status status = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + ``` + + - Object.List.Response + + | field | type | label | description | + | :----: | :---------------- | :---- | :-------------------- | + | vector | Object.Vector | | The vector | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + +### Status Code + +| code | description | +| :--: | :---------- | + +TODO + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +TODO + +## GetTimestamp RPC + +Represent the RPC to get the vector metadata. This RPC is mainly used for index correction process + +### Input + +- the scheme of `payload.v1.Object.TimestampRequest` + + ```rpc + message Object.TimestampRequest { + Object.ID id = 1; + } + + message Object.ID { + string id = 1; + } + + ``` + + - Object.TimestampRequest + + | field | type | label | description | + | :---: | :-------- | :---- | :--------------------------- | + | id | Object.ID | | The vector ID to be fetched. | + + - Object.ID + + | field | type | label | description | + | :---: | :----- | :---- | :---------- | + | id | string | | | + +### Output + +- the scheme of `payload.v1.Object.Timestamp` + + ```rpc + message Object.Timestamp { + string id = 1; + int64 timestamp = 2; + } + + ``` + + - Object.Timestamp + + | field | type | label | description | + | :-------: | :----- | :---- | :---------------------------------------------- | + | id | string | | The vector ID. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + +### Status Code + +| code | description | +| :--: | :---------- | + +TODO + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +TODO diff --git a/apis/docs/v1/payload.md.tmpl b/apis/docs/v1/payload.md.tmpl new file mode 100644 index 0000000000..769f09b1e6 --- /dev/null +++ b/apis/docs/v1/payload.md.tmpl @@ -0,0 +1,2626 @@ +{{- define "scheme:payload.v1.Control" -}} +{{ template "_scheme:payload.v1.Control" }} +{{- end -}} +{{- define "field:payload.v1.Control" -}} +{{ template "_field:payload.v1.Control" }} +{{- end -}} +{{- define "scheme:payload.v1.Control.CreateIndexRequest" -}} +{{ template "_scheme:payload.v1.Control.CreateIndexRequest" }} +{{- end -}} +{{- define "field:payload.v1.Control.CreateIndexRequest" -}} +{{ template "_field:payload.v1.Control.CreateIndexRequest" }} +{{- end -}} +{{- define "scheme:payload.v1.Discoverer" -}} +{{ template "_scheme:payload.v1.Discoverer" }} +{{- end -}} +{{- define "field:payload.v1.Discoverer" -}} +{{ template "_field:payload.v1.Discoverer" }} +{{- end -}} +{{- define "scheme:payload.v1.Discoverer.Request" -}} +{{ template "_scheme:payload.v1.Discoverer.Request" }} +{{- end -}} +{{- define "field:payload.v1.Discoverer.Request" -}} +{{ template "_field:payload.v1.Discoverer.Request" }} +{{- end -}} +{{- define "scheme:payload.v1.Empty" -}} +{{ template "_scheme:payload.v1.Empty" }} +{{- end -}} +{{- define "field:payload.v1.Empty" -}} +{{ template "_field:payload.v1.Empty" }} +{{- end -}} +{{- define "scheme:payload.v1.Filter" -}} +{{ template "_scheme:payload.v1.Filter" }} +{{- end -}} +{{- define "field:payload.v1.Filter" -}} +{{ template "_field:payload.v1.Filter" }} +{{- end -}} +{{- define "scheme:payload.v1.Filter.Config" -}} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Filter.Config" -}} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Filter.Target" -}} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Filter.Target" -}} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Flush" -}} +{{ template "_scheme:payload.v1.Flush" }} +{{- end -}} +{{- define "field:payload.v1.Flush" -}} +{{ template "_field:payload.v1.Flush" }} +{{- end -}} +{{- define "scheme:payload.v1.Flush.Request" -}} +{{ template "_scheme:payload.v1.Flush.Request" }} +{{- end -}} +{{- define "field:payload.v1.Flush.Request" -}} +{{ template "_field:payload.v1.Flush.Request" }} +{{- end -}} +{{- define "scheme:payload.v1.Info" -}} +{{ template "_scheme:payload.v1.Info" }} +{{- end -}} +{{- define "field:payload.v1.Info" -}} +{{ template "_field:payload.v1.Info" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Annotations" -}} +{{ template "_scheme:payload.v1.Info.Annotations" }} +{{ template "_scheme:payload.v1.Info.Annotations.AnnotationsEntry" }} +{{- end -}} +{{- define "field:payload.v1.Info.Annotations" -}} +{{ template "_field:payload.v1.Info.Annotations" }} +{{ template "_field:payload.v1.Info.Annotations.AnnotationsEntry" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Annotations.AnnotationsEntry" -}} +{{ template "_scheme:payload.v1.Info.Annotations.AnnotationsEntry" }} +{{- end -}} +{{- define "field:payload.v1.Info.Annotations.AnnotationsEntry" -}} +{{ template "_field:payload.v1.Info.Annotations.AnnotationsEntry" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.CPU" -}} +{{ template "_scheme:payload.v1.Info.CPU" }} +{{- end -}} +{{- define "field:payload.v1.Info.CPU" -}} +{{ template "_field:payload.v1.Info.CPU" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.IPs" -}} +{{ template "_scheme:payload.v1.Info.IPs" }} +{{- end -}} +{{- define "field:payload.v1.Info.IPs" -}} +{{ template "_field:payload.v1.Info.IPs" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index" -}} +{{ template "_scheme:payload.v1.Info.Index" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index" -}} +{{ template "_field:payload.v1.Info.Index" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.Count" -}} +{{ template "_scheme:payload.v1.Info.Index.Count" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.Count" -}} +{{ template "_field:payload.v1.Info.Index.Count" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.Detail" -}} +{{ template "_scheme:payload.v1.Info.Index.Detail" }} +{{ template "_scheme:payload.v1.Info.Index.Detail.CountsEntry" }} +{{ template "_scheme:payload.v1.Info.Index.Count" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.Detail" -}} +{{ template "_field:payload.v1.Info.Index.Detail" }} +{{ template "_field:payload.v1.Info.Index.Detail.CountsEntry" }} +{{ template "_field:payload.v1.Info.Index.Count" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.Detail.CountsEntry" -}} +{{ template "_scheme:payload.v1.Info.Index.Detail.CountsEntry" }} +{{ template "_scheme:payload.v1.Info.Index.Count" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.Detail.CountsEntry" -}} +{{ template "_field:payload.v1.Info.Index.Detail.CountsEntry" }} +{{ template "_field:payload.v1.Info.Index.Count" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.Property" -}} +{{ template "_scheme:payload.v1.Info.Index.Property" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.Property" -}} +{{ template "_field:payload.v1.Info.Index.Property" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.PropertyDetail" -}} +{{ template "_scheme:payload.v1.Info.Index.PropertyDetail" }} +{{ template "_scheme:payload.v1.Info.Index.PropertyDetail.DetailsEntry" }} +{{ template "_scheme:payload.v1.Info.Index.Property" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.PropertyDetail" -}} +{{ template "_field:payload.v1.Info.Index.PropertyDetail" }} +{{ template "_field:payload.v1.Info.Index.PropertyDetail.DetailsEntry" }} +{{ template "_field:payload.v1.Info.Index.Property" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.PropertyDetail.DetailsEntry" -}} +{{ template "_scheme:payload.v1.Info.Index.PropertyDetail.DetailsEntry" }} +{{ template "_scheme:payload.v1.Info.Index.Property" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.PropertyDetail.DetailsEntry" -}} +{{ template "_field:payload.v1.Info.Index.PropertyDetail.DetailsEntry" }} +{{ template "_field:payload.v1.Info.Index.Property" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.Statistics" -}} +{{ template "_scheme:payload.v1.Info.Index.Statistics" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.Statistics" -}} +{{ template "_field:payload.v1.Info.Index.Statistics" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.StatisticsDetail" -}} +{{ template "_scheme:payload.v1.Info.Index.StatisticsDetail" }} +{{ template "_scheme:payload.v1.Info.Index.StatisticsDetail.DetailsEntry" }} +{{ template "_scheme:payload.v1.Info.Index.Statistics" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.StatisticsDetail" -}} +{{ template "_field:payload.v1.Info.Index.StatisticsDetail" }} +{{ template "_field:payload.v1.Info.Index.StatisticsDetail.DetailsEntry" }} +{{ template "_field:payload.v1.Info.Index.Statistics" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.StatisticsDetail.DetailsEntry" -}} +{{ template "_scheme:payload.v1.Info.Index.StatisticsDetail.DetailsEntry" }} +{{ template "_scheme:payload.v1.Info.Index.Statistics" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.StatisticsDetail.DetailsEntry" -}} +{{ template "_field:payload.v1.Info.Index.StatisticsDetail.DetailsEntry" }} +{{ template "_field:payload.v1.Info.Index.Statistics" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.UUID" -}} +{{ template "_scheme:payload.v1.Info.Index.UUID" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.UUID" -}} +{{ template "_field:payload.v1.Info.Index.UUID" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.UUID.Committed" -}} +{{ template "_scheme:payload.v1.Info.Index.UUID.Committed" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.UUID.Committed" -}} +{{ template "_field:payload.v1.Info.Index.UUID.Committed" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Index.UUID.Uncommitted" -}} +{{ template "_scheme:payload.v1.Info.Index.UUID.Uncommitted" }} +{{- end -}} +{{- define "field:payload.v1.Info.Index.UUID.Uncommitted" -}} +{{ template "_field:payload.v1.Info.Index.UUID.Uncommitted" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Labels" -}} +{{ template "_scheme:payload.v1.Info.Labels" }} +{{ template "_scheme:payload.v1.Info.Labels.LabelsEntry" }} +{{- end -}} +{{- define "field:payload.v1.Info.Labels" -}} +{{ template "_field:payload.v1.Info.Labels" }} +{{ template "_field:payload.v1.Info.Labels.LabelsEntry" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Labels.LabelsEntry" -}} +{{ template "_scheme:payload.v1.Info.Labels.LabelsEntry" }} +{{- end -}} +{{- define "field:payload.v1.Info.Labels.LabelsEntry" -}} +{{ template "_field:payload.v1.Info.Labels.LabelsEntry" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Memory" -}} +{{ template "_scheme:payload.v1.Info.Memory" }} +{{- end -}} +{{- define "field:payload.v1.Info.Memory" -}} +{{ template "_field:payload.v1.Info.Memory" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Node" -}} +{{ template "_scheme:payload.v1.Info.Node" }} +{{ template "_scheme:payload.v1.Info.CPU" }} +{{ template "_scheme:payload.v1.Info.Memory" }} +{{ template "_scheme:payload.v1.Info.Pods" }} +{{ template "_scheme:payload.v1.Info.Pod" }} +{{- end -}} +{{- define "field:payload.v1.Info.Node" -}} +{{ template "_field:payload.v1.Info.Node" }} +{{ template "_field:payload.v1.Info.CPU" }} +{{ template "_field:payload.v1.Info.Memory" }} +{{ template "_field:payload.v1.Info.Pods" }} +{{ template "_field:payload.v1.Info.Pod" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Nodes" -}} +{{ template "_scheme:payload.v1.Info.Nodes" }} +{{ template "_scheme:payload.v1.Info.Node" }} +{{ template "_scheme:payload.v1.Info.CPU" }} +{{ template "_scheme:payload.v1.Info.Memory" }} +{{ template "_scheme:payload.v1.Info.Pods" }} +{{ template "_scheme:payload.v1.Info.Pod" }} +{{- end -}} +{{- define "field:payload.v1.Info.Nodes" -}} +{{ template "_field:payload.v1.Info.Nodes" }} +{{ template "_field:payload.v1.Info.Node" }} +{{ template "_field:payload.v1.Info.CPU" }} +{{ template "_field:payload.v1.Info.Memory" }} +{{ template "_field:payload.v1.Info.Pods" }} +{{ template "_field:payload.v1.Info.Pod" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Pod" -}} +{{ template "_scheme:payload.v1.Info.Pod" }} +{{ template "_scheme:payload.v1.Info.CPU" }} +{{ template "_scheme:payload.v1.Info.Memory" }} +{{ template "_scheme:payload.v1.Info.Node" }} +{{- end -}} +{{- define "field:payload.v1.Info.Pod" -}} +{{ template "_field:payload.v1.Info.Pod" }} +{{ template "_field:payload.v1.Info.CPU" }} +{{ template "_field:payload.v1.Info.Memory" }} +{{ template "_field:payload.v1.Info.Node" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Pods" -}} +{{ template "_scheme:payload.v1.Info.Pods" }} +{{ template "_scheme:payload.v1.Info.Pod" }} +{{ template "_scheme:payload.v1.Info.CPU" }} +{{ template "_scheme:payload.v1.Info.Memory" }} +{{ template "_scheme:payload.v1.Info.Node" }} +{{- end -}} +{{- define "field:payload.v1.Info.Pods" -}} +{{ template "_field:payload.v1.Info.Pods" }} +{{ template "_field:payload.v1.Info.Pod" }} +{{ template "_field:payload.v1.Info.CPU" }} +{{ template "_field:payload.v1.Info.Memory" }} +{{ template "_field:payload.v1.Info.Node" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Service" -}} +{{ template "_scheme:payload.v1.Info.Service" }} +{{ template "_scheme:payload.v1.Info.ServicePort" }} +{{ template "_scheme:payload.v1.Info.Labels" }} +{{ template "_scheme:payload.v1.Info.Annotations" }} +{{ template "_scheme:payload.v1.Info.Labels.LabelsEntry" }} +{{ template "_scheme:payload.v1.Info.Annotations.AnnotationsEntry" }} +{{- end -}} +{{- define "field:payload.v1.Info.Service" -}} +{{ template "_field:payload.v1.Info.Service" }} +{{ template "_field:payload.v1.Info.ServicePort" }} +{{ template "_field:payload.v1.Info.Labels" }} +{{ template "_field:payload.v1.Info.Annotations" }} +{{ template "_field:payload.v1.Info.Labels.LabelsEntry" }} +{{ template "_field:payload.v1.Info.Annotations.AnnotationsEntry" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.ServicePort" -}} +{{ template "_scheme:payload.v1.Info.ServicePort" }} +{{- end -}} +{{- define "field:payload.v1.Info.ServicePort" -}} +{{ template "_field:payload.v1.Info.ServicePort" }} +{{- end -}} +{{- define "scheme:payload.v1.Info.Services" -}} +{{ template "_scheme:payload.v1.Info.Services" }} +{{ template "_scheme:payload.v1.Info.Service" }} +{{ template "_scheme:payload.v1.Info.ServicePort" }} +{{ template "_scheme:payload.v1.Info.Labels" }} +{{ template "_scheme:payload.v1.Info.Annotations" }} +{{ template "_scheme:payload.v1.Info.Labels.LabelsEntry" }} +{{ template "_scheme:payload.v1.Info.Annotations.AnnotationsEntry" }} +{{- end -}} +{{- define "field:payload.v1.Info.Services" -}} +{{ template "_field:payload.v1.Info.Services" }} +{{ template "_field:payload.v1.Info.Service" }} +{{ template "_field:payload.v1.Info.ServicePort" }} +{{ template "_field:payload.v1.Info.Labels" }} +{{ template "_field:payload.v1.Info.Annotations" }} +{{ template "_field:payload.v1.Info.Labels.LabelsEntry" }} +{{ template "_field:payload.v1.Info.Annotations.AnnotationsEntry" }} +{{- end -}} +{{- define "scheme:payload.v1.Insert" -}} +{{ template "_scheme:payload.v1.Insert" }} +{{- end -}} +{{- define "field:payload.v1.Insert" -}} +{{ template "_field:payload.v1.Insert" }} +{{- end -}} +{{- define "scheme:payload.v1.Insert.Config" -}} +{{ template "_scheme:payload.v1.Insert.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Insert.Config" -}} +{{ template "_field:payload.v1.Insert.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Insert.MultiObjectRequest" -}} +{{ template "_scheme:payload.v1.Insert.MultiObjectRequest" }} +{{ template "_scheme:payload.v1.Insert.ObjectRequest" }} +{{ template "_scheme:payload.v1.Object.Blob" }} +{{ template "_scheme:payload.v1.Insert.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "field:payload.v1.Insert.MultiObjectRequest" -}} +{{ template "_field:payload.v1.Insert.MultiObjectRequest" }} +{{ template "_field:payload.v1.Insert.ObjectRequest" }} +{{ template "_field:payload.v1.Object.Blob" }} +{{ template "_field:payload.v1.Insert.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Insert.MultiRequest" -}} +{{ template "_scheme:payload.v1.Insert.MultiRequest" }} +{{ template "_scheme:payload.v1.Insert.Request" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{ template "_scheme:payload.v1.Insert.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Insert.MultiRequest" -}} +{{ template "_field:payload.v1.Insert.MultiRequest" }} +{{ template "_field:payload.v1.Insert.Request" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{ template "_field:payload.v1.Insert.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Insert.ObjectRequest" -}} +{{ template "_scheme:payload.v1.Insert.ObjectRequest" }} +{{ template "_scheme:payload.v1.Object.Blob" }} +{{ template "_scheme:payload.v1.Insert.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "field:payload.v1.Insert.ObjectRequest" -}} +{{ template "_field:payload.v1.Insert.ObjectRequest" }} +{{ template "_field:payload.v1.Object.Blob" }} +{{ template "_field:payload.v1.Insert.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Insert.Request" -}} +{{ template "_scheme:payload.v1.Insert.Request" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{ template "_scheme:payload.v1.Insert.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Insert.Request" -}} +{{ template "_field:payload.v1.Insert.Request" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{ template "_field:payload.v1.Insert.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Meta" -}} +{{ template "_scheme:payload.v1.Meta" }} +{{- end -}} +{{- define "field:payload.v1.Meta" -}} +{{ template "_field:payload.v1.Meta" }} +{{- end -}} +{{- define "scheme:payload.v1.Meta.Key" -}} +{{ template "_scheme:payload.v1.Meta.Key" }} +{{- end -}} +{{- define "field:payload.v1.Meta.Key" -}} +{{ template "_field:payload.v1.Meta.Key" }} +{{- end -}} +{{- define "scheme:payload.v1.Meta.KeyValue" -}} +{{ template "_scheme:payload.v1.Meta.KeyValue" }} +{{ template "_scheme:payload.v1.Meta.Key" }} +{{ template "_scheme:payload.v1.Meta.Value" }} +{{- end -}} +{{- define "field:payload.v1.Meta.KeyValue" -}} +{{ template "_field:payload.v1.Meta.KeyValue" }} +{{ template "_field:payload.v1.Meta.Key" }} +{{ template "_field:payload.v1.Meta.Value" }} +{{- end -}} +{{- define "scheme:payload.v1.Meta.Value" -}} +{{ template "_scheme:payload.v1.Meta.Value" }} +{{- end -}} +{{- define "field:payload.v1.Meta.Value" -}} +{{ template "_field:payload.v1.Meta.Value" }} +{{- end -}} +{{- define "scheme:payload.v1.Mirror" -}} +{{ template "_scheme:payload.v1.Mirror" }} +{{- end -}} +{{- define "field:payload.v1.Mirror" -}} +{{ template "_field:payload.v1.Mirror" }} +{{- end -}} +{{- define "scheme:payload.v1.Mirror.Target" -}} +{{ template "_scheme:payload.v1.Mirror.Target" }} +{{- end -}} +{{- define "field:payload.v1.Mirror.Target" -}} +{{ template "_field:payload.v1.Mirror.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Mirror.Targets" -}} +{{ template "_scheme:payload.v1.Mirror.Targets" }} +{{ template "_scheme:payload.v1.Mirror.Target" }} +{{- end -}} +{{- define "field:payload.v1.Mirror.Targets" -}} +{{ template "_field:payload.v1.Mirror.Targets" }} +{{ template "_field:payload.v1.Mirror.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Object" -}} +{{ template "_scheme:payload.v1.Object" }} +{{- end -}} +{{- define "field:payload.v1.Object" -}} +{{ template "_field:payload.v1.Object" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.Blob" -}} +{{ template "_scheme:payload.v1.Object.Blob" }} +{{- end -}} +{{- define "field:payload.v1.Object.Blob" -}} +{{ template "_field:payload.v1.Object.Blob" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.Distance" -}} +{{ template "_scheme:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "field:payload.v1.Object.Distance" -}} +{{ template "_field:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.ID" -}} +{{ template "_scheme:payload.v1.Object.ID" }} +{{- end -}} +{{- define "field:payload.v1.Object.ID" -}} +{{ template "_field:payload.v1.Object.ID" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.IDs" -}} +{{ template "_scheme:payload.v1.Object.IDs" }} +{{- end -}} +{{- define "field:payload.v1.Object.IDs" -}} +{{ template "_field:payload.v1.Object.IDs" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.List" -}} +{{ template "_scheme:payload.v1.Object.List" }} +{{- end -}} +{{- define "field:payload.v1.Object.List" -}} +{{ template "_field:payload.v1.Object.List" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.List.Request" -}} +{{ template "_scheme:payload.v1.Object.List.Request" }} +{{- end -}} +{{- define "field:payload.v1.Object.List.Request" -}} +{{ template "_field:payload.v1.Object.List.Request" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.List.Response" -}} +{{ template "_scheme:payload.v1.Object.List.Response" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{- end -}} +{{- define "field:payload.v1.Object.List.Response" -}} +{{ template "_field:payload.v1.Object.List.Response" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.Location" -}} +{{ template "_scheme:payload.v1.Object.Location" }} +{{- end -}} +{{- define "field:payload.v1.Object.Location" -}} +{{ template "_field:payload.v1.Object.Location" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.Locations" -}} +{{ template "_scheme:payload.v1.Object.Locations" }} +{{ template "_scheme:payload.v1.Object.Location" }} +{{- end -}} +{{- define "field:payload.v1.Object.Locations" -}} +{{ template "_field:payload.v1.Object.Locations" }} +{{ template "_field:payload.v1.Object.Location" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.ReshapeVector" -}} +{{ template "_scheme:payload.v1.Object.ReshapeVector" }} +{{- end -}} +{{- define "field:payload.v1.Object.ReshapeVector" -}} +{{ template "_field:payload.v1.Object.ReshapeVector" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.StreamBlob" -}} +{{ template "_scheme:payload.v1.Object.StreamBlob" }} +{{ template "_scheme:payload.v1.Object.Blob" }} +{{- end -}} +{{- define "field:payload.v1.Object.StreamBlob" -}} +{{ template "_field:payload.v1.Object.StreamBlob" }} +{{ template "_field:payload.v1.Object.Blob" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.StreamDistance" -}} +{{ template "_scheme:payload.v1.Object.StreamDistance" }} +{{ template "_scheme:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "field:payload.v1.Object.StreamDistance" -}} +{{ template "_field:payload.v1.Object.StreamDistance" }} +{{ template "_field:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.StreamLocation" -}} +{{ template "_scheme:payload.v1.Object.StreamLocation" }} +{{ template "_scheme:payload.v1.Object.Location" }} +{{- end -}} +{{- define "field:payload.v1.Object.StreamLocation" -}} +{{ template "_field:payload.v1.Object.StreamLocation" }} +{{ template "_field:payload.v1.Object.Location" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.StreamVector" -}} +{{ template "_scheme:payload.v1.Object.StreamVector" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{- end -}} +{{- define "field:payload.v1.Object.StreamVector" -}} +{{ template "_field:payload.v1.Object.StreamVector" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.Timestamp" -}} +{{ template "_scheme:payload.v1.Object.Timestamp" }} +{{- end -}} +{{- define "field:payload.v1.Object.Timestamp" -}} +{{ template "_field:payload.v1.Object.Timestamp" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.TimestampRequest" -}} +{{ template "_scheme:payload.v1.Object.TimestampRequest" }} +{{ template "_scheme:payload.v1.Object.ID" }} +{{- end -}} +{{- define "field:payload.v1.Object.TimestampRequest" -}} +{{ template "_field:payload.v1.Object.TimestampRequest" }} +{{ template "_field:payload.v1.Object.ID" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.Vector" -}} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{- end -}} +{{- define "field:payload.v1.Object.Vector" -}} +{{ template "_field:payload.v1.Object.Vector" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.VectorRequest" -}} +{{ template "_scheme:payload.v1.Object.VectorRequest" }} +{{ template "_scheme:payload.v1.Object.ID" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Object.VectorRequest" -}} +{{ template "_field:payload.v1.Object.VectorRequest" }} +{{ template "_field:payload.v1.Object.ID" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Object.Vectors" -}} +{{ template "_scheme:payload.v1.Object.Vectors" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{- end -}} +{{- define "field:payload.v1.Object.Vectors" -}} +{{ template "_field:payload.v1.Object.Vectors" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{- end -}} +{{- define "scheme:payload.v1.Remove" -}} +{{ template "_scheme:payload.v1.Remove" }} +{{- end -}} +{{- define "field:payload.v1.Remove" -}} +{{ template "_field:payload.v1.Remove" }} +{{- end -}} +{{- define "scheme:payload.v1.Remove.Config" -}} +{{ template "_scheme:payload.v1.Remove.Config" }} +{{- end -}} +{{- define "field:payload.v1.Remove.Config" -}} +{{ template "_field:payload.v1.Remove.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Remove.MultiRequest" -}} +{{ template "_scheme:payload.v1.Remove.MultiRequest" }} +{{ template "_scheme:payload.v1.Remove.Request" }} +{{ template "_scheme:payload.v1.Object.ID" }} +{{ template "_scheme:payload.v1.Remove.Config" }} +{{- end -}} +{{- define "field:payload.v1.Remove.MultiRequest" -}} +{{ template "_field:payload.v1.Remove.MultiRequest" }} +{{ template "_field:payload.v1.Remove.Request" }} +{{ template "_field:payload.v1.Object.ID" }} +{{ template "_field:payload.v1.Remove.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Remove.Request" -}} +{{ template "_scheme:payload.v1.Remove.Request" }} +{{ template "_scheme:payload.v1.Object.ID" }} +{{ template "_scheme:payload.v1.Remove.Config" }} +{{- end -}} +{{- define "field:payload.v1.Remove.Request" -}} +{{ template "_field:payload.v1.Remove.Request" }} +{{ template "_field:payload.v1.Object.ID" }} +{{ template "_field:payload.v1.Remove.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Remove.Timestamp" -}} +{{ template "_scheme:payload.v1.Remove.Timestamp" }} +{{ template "_scheme:payload.v1.Remove.Timestamp.Operator" }} +{{- end -}} +{{- define "field:payload.v1.Remove.Timestamp" -}} +{{ template "_field:payload.v1.Remove.Timestamp" }} +{{ template "_field:payload.v1.Remove.Timestamp.Operator" }} +{{- end -}} +{{- define "scheme:payload.v1.Remove.TimestampRequest" -}} +{{ template "_scheme:payload.v1.Remove.TimestampRequest" }} +{{ template "_scheme:payload.v1.Remove.Timestamp" }} +{{ template "_scheme:payload.v1.Remove.Timestamp.Operator" }} +{{- end -}} +{{- define "field:payload.v1.Remove.TimestampRequest" -}} +{{ template "_field:payload.v1.Remove.TimestampRequest" }} +{{ template "_field:payload.v1.Remove.Timestamp" }} +{{ template "_field:payload.v1.Remove.Timestamp.Operator" }} +{{- end -}} +{{- define "scheme:payload.v1.Search" -}} +{{ template "_scheme:payload.v1.Search" }} +{{- end -}} +{{- define "field:payload.v1.Search" -}} +{{ template "_field:payload.v1.Search" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.Config" -}} +{{ template "_scheme:payload.v1.Search.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Search.Config" -}} +{{ template "_field:payload.v1.Search.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.IDRequest" -}} +{{ template "_scheme:payload.v1.Search.IDRequest" }} +{{ template "_scheme:payload.v1.Search.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Search.IDRequest" -}} +{{ template "_field:payload.v1.Search.IDRequest" }} +{{ template "_field:payload.v1.Search.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.MultiIDRequest" -}} +{{ template "_scheme:payload.v1.Search.MultiIDRequest" }} +{{ template "_scheme:payload.v1.Search.IDRequest" }} +{{ template "_scheme:payload.v1.Search.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Search.MultiIDRequest" -}} +{{ template "_field:payload.v1.Search.MultiIDRequest" }} +{{ template "_field:payload.v1.Search.IDRequest" }} +{{ template "_field:payload.v1.Search.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.MultiObjectRequest" -}} +{{ template "_scheme:payload.v1.Search.MultiObjectRequest" }} +{{ template "_scheme:payload.v1.Search.ObjectRequest" }} +{{ template "_scheme:payload.v1.Search.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Search.AggregationAlgorithm" }} +{{- end -}} +{{- define "field:payload.v1.Search.MultiObjectRequest" -}} +{{ template "_field:payload.v1.Search.MultiObjectRequest" }} +{{ template "_field:payload.v1.Search.ObjectRequest" }} +{{ template "_field:payload.v1.Search.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Search.AggregationAlgorithm" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.MultiRequest" -}} +{{ template "_scheme:payload.v1.Search.MultiRequest" }} +{{ template "_scheme:payload.v1.Search.Request" }} +{{ template "_scheme:payload.v1.Search.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Search.MultiRequest" -}} +{{ template "_field:payload.v1.Search.MultiRequest" }} +{{ template "_field:payload.v1.Search.Request" }} +{{ template "_field:payload.v1.Search.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.ObjectRequest" -}} +{{ template "_scheme:payload.v1.Search.ObjectRequest" }} +{{ template "_scheme:payload.v1.Search.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Search.AggregationAlgorithm" }} +{{- end -}} +{{- define "field:payload.v1.Search.ObjectRequest" -}} +{{ template "_field:payload.v1.Search.ObjectRequest" }} +{{ template "_field:payload.v1.Search.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Search.AggregationAlgorithm" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.Request" -}} +{{ template "_scheme:payload.v1.Search.Request" }} +{{ template "_scheme:payload.v1.Search.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Search.Request" -}} +{{ template "_field:payload.v1.Search.Request" }} +{{ template "_field:payload.v1.Search.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Search.AggregationAlgorithm" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.Response" -}} +{{ template "_scheme:payload.v1.Search.Response" }} +{{ template "_scheme:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "field:payload.v1.Search.Response" -}} +{{ template "_field:payload.v1.Search.Response" }} +{{ template "_field:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.Responses" -}} +{{ template "_scheme:payload.v1.Search.Responses" }} +{{ template "_scheme:payload.v1.Search.Response" }} +{{ template "_scheme:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "field:payload.v1.Search.Responses" -}} +{{ template "_field:payload.v1.Search.Responses" }} +{{ template "_field:payload.v1.Search.Response" }} +{{ template "_field:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "scheme:payload.v1.Search.StreamResponse" -}} +{{ template "_scheme:payload.v1.Search.StreamResponse" }} +{{ template "_scheme:payload.v1.Search.Response" }} +{{ template "_scheme:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "field:payload.v1.Search.StreamResponse" -}} +{{ template "_field:payload.v1.Search.StreamResponse" }} +{{ template "_field:payload.v1.Search.Response" }} +{{ template "_field:payload.v1.Object.Distance" }} +{{- end -}} +{{- define "scheme:payload.v1.Update" -}} +{{ template "_scheme:payload.v1.Update" }} +{{- end -}} +{{- define "field:payload.v1.Update" -}} +{{ template "_field:payload.v1.Update" }} +{{- end -}} +{{- define "scheme:payload.v1.Update.Config" -}} +{{ template "_scheme:payload.v1.Update.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Update.Config" -}} +{{ template "_field:payload.v1.Update.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Update.MultiObjectRequest" -}} +{{ template "_scheme:payload.v1.Update.MultiObjectRequest" }} +{{ template "_scheme:payload.v1.Update.ObjectRequest" }} +{{ template "_scheme:payload.v1.Object.Blob" }} +{{ template "_scheme:payload.v1.Update.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "field:payload.v1.Update.MultiObjectRequest" -}} +{{ template "_field:payload.v1.Update.MultiObjectRequest" }} +{{ template "_field:payload.v1.Update.ObjectRequest" }} +{{ template "_field:payload.v1.Object.Blob" }} +{{ template "_field:payload.v1.Update.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Update.MultiRequest" -}} +{{ template "_scheme:payload.v1.Update.MultiRequest" }} +{{ template "_scheme:payload.v1.Update.Request" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{ template "_scheme:payload.v1.Update.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Update.MultiRequest" -}} +{{ template "_field:payload.v1.Update.MultiRequest" }} +{{ template "_field:payload.v1.Update.Request" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{ template "_field:payload.v1.Update.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Update.ObjectRequest" -}} +{{ template "_scheme:payload.v1.Update.ObjectRequest" }} +{{ template "_scheme:payload.v1.Object.Blob" }} +{{ template "_scheme:payload.v1.Update.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "field:payload.v1.Update.ObjectRequest" -}} +{{ template "_field:payload.v1.Update.ObjectRequest" }} +{{ template "_field:payload.v1.Object.Blob" }} +{{ template "_field:payload.v1.Update.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Update.Request" -}} +{{ template "_scheme:payload.v1.Update.Request" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{ template "_scheme:payload.v1.Update.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Update.Request" -}} +{{ template "_field:payload.v1.Update.Request" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{ template "_field:payload.v1.Update.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Update.TimestampRequest" -}} +{{ template "_scheme:payload.v1.Update.TimestampRequest" }} +{{- end -}} +{{- define "field:payload.v1.Update.TimestampRequest" -}} +{{ template "_field:payload.v1.Update.TimestampRequest" }} +{{- end -}} +{{- define "scheme:payload.v1.Upsert" -}} +{{ template "_scheme:payload.v1.Upsert" }} +{{- end -}} +{{- define "field:payload.v1.Upsert" -}} +{{ template "_field:payload.v1.Upsert" }} +{{- end -}} +{{- define "scheme:payload.v1.Upsert.Config" -}} +{{ template "_scheme:payload.v1.Upsert.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Upsert.Config" -}} +{{ template "_field:payload.v1.Upsert.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Upsert.MultiObjectRequest" -}} +{{ template "_scheme:payload.v1.Upsert.MultiObjectRequest" }} +{{ template "_scheme:payload.v1.Upsert.ObjectRequest" }} +{{ template "_scheme:payload.v1.Object.Blob" }} +{{ template "_scheme:payload.v1.Upsert.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "field:payload.v1.Upsert.MultiObjectRequest" -}} +{{ template "_field:payload.v1.Upsert.MultiObjectRequest" }} +{{ template "_field:payload.v1.Upsert.ObjectRequest" }} +{{ template "_field:payload.v1.Object.Blob" }} +{{ template "_field:payload.v1.Upsert.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Upsert.MultiRequest" -}} +{{ template "_scheme:payload.v1.Upsert.MultiRequest" }} +{{ template "_scheme:payload.v1.Upsert.Request" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{ template "_scheme:payload.v1.Upsert.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Upsert.MultiRequest" -}} +{{ template "_field:payload.v1.Upsert.MultiRequest" }} +{{ template "_field:payload.v1.Upsert.Request" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{ template "_field:payload.v1.Upsert.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "scheme:payload.v1.Upsert.ObjectRequest" -}} +{{ template "_scheme:payload.v1.Upsert.ObjectRequest" }} +{{ template "_scheme:payload.v1.Object.Blob" }} +{{ template "_scheme:payload.v1.Upsert.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "field:payload.v1.Upsert.ObjectRequest" -}} +{{ template "_field:payload.v1.Upsert.ObjectRequest" }} +{{ template "_field:payload.v1.Object.Blob" }} +{{ template "_field:payload.v1.Upsert.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{- end -}} +{{- define "scheme:payload.v1.Upsert.Request" -}} +{{ template "_scheme:payload.v1.Upsert.Request" }} +{{ template "_scheme:payload.v1.Object.Vector" }} +{{ template "_scheme:payload.v1.Upsert.Config" }} +{{ template "_scheme:payload.v1.Filter.Config" }} +{{ template "_scheme:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "field:payload.v1.Upsert.Request" -}} +{{ template "_field:payload.v1.Upsert.Request" }} +{{ template "_field:payload.v1.Object.Vector" }} +{{ template "_field:payload.v1.Upsert.Config" }} +{{ template "_field:payload.v1.Filter.Config" }} +{{ template "_field:payload.v1.Filter.Target" }} +{{- end -}} +{{- define "_scheme:payload.v1.Control" }} + message Control { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Control" }} + - Control + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Control.CreateIndexRequest" }} + message Control.CreateIndexRequest { + uint32 pool_size = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Control.CreateIndexRequest" }} + - Control.CreateIndexRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | pool_size | uint32 | | The pool size of the create index operation. | +{{- end -}} + +{{- define "_scheme:payload.v1.Discoverer" }} + message Discoverer { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Discoverer" }} + - Discoverer + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Discoverer.Request" }} + message Discoverer.Request { + string name = 1; + string namespace = 2; + string node = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Discoverer.Request" }} + - Discoverer.Request + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | name | string | | The agent name to be discovered. | + | namespace | string | | The namespace to be discovered. | + | node | string | | The node to be discovered. | +{{- end -}} + +{{- define "_scheme:payload.v1.Empty" }} + message Empty { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Empty" }} + - Empty + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Filter" }} + message Filter { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Filter" }} + - Filter + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Filter.Config" }} + message Filter.Config { + repeated Filter.Target targets = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Filter.Config" }} + - Filter.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | +{{- end -}} + +{{- define "_scheme:payload.v1.Filter.Target" }} + message Filter.Target { + string host = 1; + uint32 port = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Filter.Target" }} + - Filter.Target + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | +{{- end -}} + +{{- define "_scheme:payload.v1.Flush" }} + message Flush { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Flush" }} + - Flush + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Flush.Request" }} + message Flush.Request { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Flush.Request" }} + - Flush.Request + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Info" }} + message Info { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Info" }} + - Info + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Annotations" }} + message Info.Annotations { + repeated Info.Annotations.AnnotationsEntry annotations = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Annotations" }} + - Info.Annotations + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | annotations | Info.Annotations.AnnotationsEntry | repeated | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Annotations.AnnotationsEntry" }} + message Info.Annotations.AnnotationsEntry { + string key = 1; + string value = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Annotations.AnnotationsEntry" }} + - Info.Annotations.AnnotationsEntry + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | key | string | | | + | value | string | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.CPU" }} + message Info.CPU { + double limit = 1; + double request = 2; + double usage = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.CPU" }} + - Info.CPU + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | limit | double | | The CPU resource limit. | + | request | double | | The CPU resource requested. | + | usage | double | | The CPU usage. | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.IPs" }} + message Info.IPs { + repeated string ip = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.IPs" }} + - Info.IPs + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | ip | string | repeated | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index" }} + message Info.Index { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index" }} + - Info.Index + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.Count" }} + message Info.Index.Count { + uint32 stored = 1; + uint32 uncommitted = 2; + bool indexing = 3; + bool saving = 4; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.Count" }} + - Info.Index.Count + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | stored | uint32 | | The stored index count. | + | uncommitted | uint32 | | The uncommitted index count. | + | indexing | bool | | The indexing index count. | + | saving | bool | | The saving index count. | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.Detail" }} + message Info.Index.Detail { + repeated Info.Index.Detail.CountsEntry counts = 1; + uint32 replica = 2; + uint32 live_agents = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.Detail" }} + - Info.Index.Detail + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | counts | Info.Index.Detail.CountsEntry | repeated | count infos for each agents | + | replica | uint32 | | index replica of vald cluster | + | live_agents | uint32 | | live agent replica of vald cluster | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.Detail.CountsEntry" }} + message Info.Index.Detail.CountsEntry { + string key = 1; + Info.Index.Count value = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.Detail.CountsEntry" }} + - Info.Index.Detail.CountsEntry + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | key | string | | | + | value | Info.Index.Count | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.Property" }} + message Info.Index.Property { + int32 dimension = 1; + int32 thread_pool_size = 2; + string object_type = 3; + string distance_type = 4; + string index_type = 5; + string database_type = 6; + string object_alignment = 7; + int32 path_adjustment_interval = 8; + int32 graph_shared_memory_size = 9; + int32 tree_shared_memory_size = 10; + int32 object_shared_memory_size = 11; + int32 prefetch_offset = 12; + int32 prefetch_size = 13; + string accuracy_table = 14; + string search_type = 15; + float max_magnitude = 16; + int32 n_of_neighbors_for_insertion_order = 17; + float epsilon_for_insertion_order = 18; + string refinement_object_type = 19; + int32 truncation_threshold = 20; + int32 edge_size_for_creation = 21; + int32 edge_size_for_search = 22; + int32 edge_size_limit_for_creation = 23; + double insertion_radius_coefficient = 24; + int32 seed_size = 25; + string seed_type = 26; + int32 truncation_thread_pool_size = 27; + int32 batch_size_for_creation = 28; + string graph_type = 29; + int32 dynamic_edge_size_base = 30; + int32 dynamic_edge_size_rate = 31; + float build_time_limit = 32; + int32 outgoing_edge = 33; + int32 incoming_edge = 34; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.Property" }} + - Info.Index.Property + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | dimension | int32 | | | + | thread_pool_size | int32 | | | + | object_type | string | | | + | distance_type | string | | | + | index_type | string | | | + | database_type | string | | | + | object_alignment | string | | | + | path_adjustment_interval | int32 | | | + | graph_shared_memory_size | int32 | | | + | tree_shared_memory_size | int32 | | | + | object_shared_memory_size | int32 | | | + | prefetch_offset | int32 | | | + | prefetch_size | int32 | | | + | accuracy_table | string | | | + | search_type | string | | | + | max_magnitude | float | | | + | n_of_neighbors_for_insertion_order | int32 | | | + | epsilon_for_insertion_order | float | | | + | refinement_object_type | string | | | + | truncation_threshold | int32 | | | + | edge_size_for_creation | int32 | | | + | edge_size_for_search | int32 | | | + | edge_size_limit_for_creation | int32 | | | + | insertion_radius_coefficient | double | | | + | seed_size | int32 | | | + | seed_type | string | | | + | truncation_thread_pool_size | int32 | | | + | batch_size_for_creation | int32 | | | + | graph_type | string | | | + | dynamic_edge_size_base | int32 | | | + | dynamic_edge_size_rate | int32 | | | + | build_time_limit | float | | | + | outgoing_edge | int32 | | | + | incoming_edge | int32 | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.PropertyDetail" }} + message Info.Index.PropertyDetail { + repeated Info.Index.PropertyDetail.DetailsEntry details = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.PropertyDetail" }} + - Info.Index.PropertyDetail + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | details | Info.Index.PropertyDetail.DetailsEntry | repeated | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.PropertyDetail.DetailsEntry" }} + message Info.Index.PropertyDetail.DetailsEntry { + string key = 1; + Info.Index.Property value = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.PropertyDetail.DetailsEntry" }} + - Info.Index.PropertyDetail.DetailsEntry + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | key | string | | | + | value | Info.Index.Property | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.Statistics" }} + message Info.Index.Statistics { + bool valid = 1; + int32 median_indegree = 2; + int32 median_outdegree = 3; + uint64 max_number_of_indegree = 4; + uint64 max_number_of_outdegree = 5; + uint64 min_number_of_indegree = 6; + uint64 min_number_of_outdegree = 7; + uint64 mode_indegree = 8; + uint64 mode_outdegree = 9; + uint64 nodes_skipped_for_10_edges = 10; + uint64 nodes_skipped_for_indegree_distance = 11; + uint64 number_of_edges = 12; + uint64 number_of_indexed_objects = 13; + uint64 number_of_nodes = 14; + uint64 number_of_nodes_without_edges = 15; + uint64 number_of_nodes_without_indegree = 16; + uint64 number_of_objects = 17; + uint64 number_of_removed_objects = 18; + uint64 size_of_object_repository = 19; + uint64 size_of_refinement_object_repository = 20; + double variance_of_indegree = 21; + double variance_of_outdegree = 22; + double mean_edge_length = 23; + double mean_edge_length_for_10_edges = 24; + double mean_indegree_distance_for_10_edges = 25; + double mean_number_of_edges_per_node = 26; + double c1_indegree = 27; + double c5_indegree = 28; + double c95_outdegree = 29; + double c99_outdegree = 30; + repeated int64 indegree_count = 31; + repeated uint64 outdegree_histogram = 32; + repeated uint64 indegree_histogram = 33; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.Statistics" }} + - Info.Index.Statistics + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | valid | bool | | | + | median_indegree | int32 | | | + | median_outdegree | int32 | | | + | max_number_of_indegree | uint64 | | | + | max_number_of_outdegree | uint64 | | | + | min_number_of_indegree | uint64 | | | + | min_number_of_outdegree | uint64 | | | + | mode_indegree | uint64 | | | + | mode_outdegree | uint64 | | | + | nodes_skipped_for_10_edges | uint64 | | | + | nodes_skipped_for_indegree_distance | uint64 | | | + | number_of_edges | uint64 | | | + | number_of_indexed_objects | uint64 | | | + | number_of_nodes | uint64 | | | + | number_of_nodes_without_edges | uint64 | | | + | number_of_nodes_without_indegree | uint64 | | | + | number_of_objects | uint64 | | | + | number_of_removed_objects | uint64 | | | + | size_of_object_repository | uint64 | | | + | size_of_refinement_object_repository | uint64 | | | + | variance_of_indegree | double | | | + | variance_of_outdegree | double | | | + | mean_edge_length | double | | | + | mean_edge_length_for_10_edges | double | | | + | mean_indegree_distance_for_10_edges | double | | | + | mean_number_of_edges_per_node | double | | | + | c1_indegree | double | | | + | c5_indegree | double | | | + | c95_outdegree | double | | | + | c99_outdegree | double | | | + | indegree_count | int64 | repeated | | + | outdegree_histogram | uint64 | repeated | | + | indegree_histogram | uint64 | repeated | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.StatisticsDetail" }} + message Info.Index.StatisticsDetail { + repeated Info.Index.StatisticsDetail.DetailsEntry details = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.StatisticsDetail" }} + - Info.Index.StatisticsDetail + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | details | Info.Index.StatisticsDetail.DetailsEntry | repeated | count infos for each agents | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.StatisticsDetail.DetailsEntry" }} + message Info.Index.StatisticsDetail.DetailsEntry { + string key = 1; + Info.Index.Statistics value = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.StatisticsDetail.DetailsEntry" }} + - Info.Index.StatisticsDetail.DetailsEntry + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | key | string | | | + | value | Info.Index.Statistics | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.UUID" }} + message Info.Index.UUID { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.UUID" }} + - Info.Index.UUID + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.UUID.Committed" }} + message Info.Index.UUID.Committed { + string uuid = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.UUID.Committed" }} + - Info.Index.UUID.Committed + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | uuid | string | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Index.UUID.Uncommitted" }} + message Info.Index.UUID.Uncommitted { + string uuid = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Index.UUID.Uncommitted" }} + - Info.Index.UUID.Uncommitted + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | uuid | string | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Labels" }} + message Info.Labels { + repeated Info.Labels.LabelsEntry labels = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Labels" }} + - Info.Labels + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | labels | Info.Labels.LabelsEntry | repeated | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Labels.LabelsEntry" }} + message Info.Labels.LabelsEntry { + string key = 1; + string value = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Labels.LabelsEntry" }} + - Info.Labels.LabelsEntry + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | key | string | | | + | value | string | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Memory" }} + message Info.Memory { + double limit = 1; + double request = 2; + double usage = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Memory" }} + - Info.Memory + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | limit | double | | The memory limit. | + | request | double | | The memory requested. | + | usage | double | | The memory usage. | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Node" }} + message Info.Node { + string name = 1; + string internal_addr = 2; + string external_addr = 3; + Info.CPU cpu = 4; + Info.Memory memory = 5; + Info.Pods Pods = 6; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Node" }} + - Info.Node + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | name | string | | The name of the node. | + | internal_addr | string | | The internal IP address of the node. | + | external_addr | string | | The external IP address of the node. | + | cpu | Info.CPU | | The CPU information of the node. | + | memory | Info.Memory | | The memory information of the node. | + | Pods | Info.Pods | | The pod information of the node. | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Nodes" }} + message Info.Nodes { + repeated Info.Node nodes = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Nodes" }} + - Info.Nodes + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | nodes | Info.Node | repeated | The multiple node information. | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Pod" }} + message Info.Pod { + string app_name = 1; + string name = 2; + string namespace = 3; + string ip = 4; + Info.CPU cpu = 5; + Info.Memory memory = 6; + Info.Node node = 7; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Pod" }} + - Info.Pod + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | app_name | string | | The app name of the pod on the label. | + | name | string | | The name of the pod. | + | namespace | string | | The namespace of the pod. | + | ip | string | | The IP of the pod. | + | cpu | Info.CPU | | The CPU information of the pod. | + | memory | Info.Memory | | The memory information of the pod. | + | node | Info.Node | | The node information of the pod. | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Pods" }} + message Info.Pods { + repeated Info.Pod pods = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Pods" }} + - Info.Pods + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | pods | Info.Pod | repeated | The multiple pod information. | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Service" }} + message Info.Service { + string name = 1; + string cluster_ip = 2; + repeated string cluster_ips = 3; + repeated Info.ServicePort ports = 4; + Info.Labels labels = 5; + Info.Annotations annotations = 6; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Service" }} + - Info.Service + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | name | string | | The name of the svc. | + | cluster_ip | string | | The cluster ip of the svc. | + | cluster_ips | string | repeated | The cluster ips of the svc. | + | ports | Info.ServicePort | repeated | The port of the svc. | + | labels | Info.Labels | | The labels of the service. | + | annotations | Info.Annotations | | The annotations of the service. | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.ServicePort" }} + message Info.ServicePort { + string name = 1; + int32 port = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.ServicePort" }} + - Info.ServicePort + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | name | string | | The name of the port. | + | port | int32 | | The port number | +{{- end -}} + +{{- define "_scheme:payload.v1.Info.Services" }} + message Info.Services { + repeated Info.Service services = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Info.Services" }} + - Info.Services + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | services | Info.Service | repeated | The multiple service information. | +{{- end -}} + +{{- define "_scheme:payload.v1.Insert" }} + message Insert { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Insert" }} + - Insert + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Insert.Config" }} + message Insert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Insert.Config" }} + - Insert.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during insert operation. | + | filters | Filter.Config | | Filter configurations. | + | timestamp | int64 | | Insert timestamp. | +{{- end -}} + +{{- define "_scheme:payload.v1.Insert.MultiObjectRequest" }} + message Insert.MultiObjectRequest { + repeated Insert.ObjectRequest requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Insert.MultiObjectRequest" }} + - Insert.MultiObjectRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Insert.ObjectRequest | repeated | Represent multiple insert by object content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Insert.MultiRequest" }} + message Insert.MultiRequest { + repeated Insert.Request requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Insert.MultiRequest" }} + - Insert.MultiRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Insert.Request | repeated | Represent multiple insert request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Insert.ObjectRequest" }} + message Insert.ObjectRequest { + Object.Blob object = 1; + Insert.Config config = 2; + Filter.Target vectorizer = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Insert.ObjectRequest" }} + - Insert.ObjectRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | object | Object.Blob | | The binary object to be inserted. | + | config | Insert.Config | | The configuration of the insert request. | + | vectorizer | Filter.Target | | Filter configurations. | +{{- end -}} + +{{- define "_scheme:payload.v1.Insert.Request" }} + message Insert.Request { + Object.Vector vector = 1; + Insert.Config config = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Insert.Request" }} + - Insert.Request + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | vector | Object.Vector | | The vector to be inserted. | + | config | Insert.Config | | The configuration of the insert request. | +{{- end -}} + +{{- define "_scheme:payload.v1.Meta" }} + message Meta { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Meta" }} + - Meta + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Meta.Key" }} + message Meta.Key { + string key = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Meta.Key" }} + - Meta.Key + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | key | string | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Meta.KeyValue" }} + message Meta.KeyValue { + Meta.Key key = 1; + Meta.Value value = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Meta.KeyValue" }} + - Meta.KeyValue + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | key | Meta.Key | | | + | value | Meta.Value | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Meta.Value" }} + message Meta.Value { + google.protobuf.Any value = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Meta.Value" }} + - Meta.Value + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | value | google.protobuf.Any | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Mirror" }} + message Mirror { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Mirror" }} + - Mirror + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Mirror.Target" }} + message Mirror.Target { + string host = 1; + uint32 port = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Mirror.Target" }} + - Mirror.Target + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | +{{- end -}} + +{{- define "_scheme:payload.v1.Mirror.Targets" }} + message Mirror.Targets { + repeated Mirror.Target targets = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Mirror.Targets" }} + - Mirror.Targets + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | targets | Mirror.Target | repeated | The multiple target information. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object" }} + message Object { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Object" }} + - Object + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Object.Blob" }} + message Object.Blob { + string id = 1; + bytes object = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.Blob" }} + - Object.Blob + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | string | | The object ID. | + | object | bytes | | The binary object. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.Distance" }} + message Object.Distance { + string id = 1; + float distance = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.Distance" }} + - Object.Distance + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.ID" }} + message Object.ID { + string id = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.ID" }} + - Object.ID + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | string | | | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.IDs" }} + message Object.IDs { + repeated string ids = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.IDs" }} + - Object.IDs + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | ids | string | repeated | | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.List" }} + message Object.List { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Object.List" }} + - Object.List + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Object.List.Request" }} + message Object.List.Request { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Object.List.Request" }} + - Object.List.Request + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Object.List.Response" }} + message Object.List.Response { + Object.Vector vector = 1; + google.rpc.Status status = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.List.Response" }} + - Object.List.Response + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | vector | Object.Vector | | The vector | + | status | google.rpc.Status | | The RPC error status. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.Location" }} + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.Location" }} + - Object.Location + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.Locations" }} + message Object.Locations { + repeated Object.Location locations = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.Locations" }} + - Object.Locations + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | locations | Object.Location | repeated | | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.ReshapeVector" }} + message Object.ReshapeVector { + bytes object = 1; + repeated int32 shape = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.ReshapeVector" }} + - Object.ReshapeVector + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | object | bytes | | The binary object. | + | shape | int32 | repeated | The new shape. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.StreamBlob" }} + message Object.StreamBlob { + Object.Blob blob = 1; + google.rpc.Status status = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.StreamBlob" }} + - Object.StreamBlob + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | blob | Object.Blob | | The binary object. | + | status | google.rpc.Status | | The RPC error status. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.StreamDistance" }} + message Object.StreamDistance { + Object.Distance distance = 1; + google.rpc.Status status = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.StreamDistance" }} + - Object.StreamDistance + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | distance | Object.Distance | | The distance. | + | status | google.rpc.Status | | The RPC error status. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.StreamLocation" }} + message Object.StreamLocation { + Object.Location location = 1; + google.rpc.Status status = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.StreamLocation" }} + - Object.StreamLocation + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | location | Object.Location | | The vector location. | + | status | google.rpc.Status | | The RPC error status. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.StreamVector" }} + message Object.StreamVector { + Object.Vector vector = 1; + google.rpc.Status status = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.StreamVector" }} + - Object.StreamVector + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | vector | Object.Vector | | The vector. | + | status | google.rpc.Status | | The RPC error status. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.Timestamp" }} + message Object.Timestamp { + string id = 1; + int64 timestamp = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.Timestamp" }} + - Object.Timestamp + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | string | | The vector ID. | + | timestamp | int64 | | timestamp represents when this vector inserted. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.TimestampRequest" }} + message Object.TimestampRequest { + Object.ID id = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.TimestampRequest" }} + - Object.TimestampRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | Object.ID | | The vector ID to be fetched. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.Vector" }} + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.Vector" }} + - Object.Vector + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.VectorRequest" }} + message Object.VectorRequest { + Object.ID id = 1; + Filter.Config filters = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.VectorRequest" }} + - Object.VectorRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | Object.ID | | The vector ID to be fetched. | + | filters | Filter.Config | | Filter configurations. | +{{- end -}} + +{{- define "_scheme:payload.v1.Object.Vectors" }} + message Object.Vectors { + repeated Object.Vector vectors = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Object.Vectors" }} + - Object.Vectors + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | vectors | Object.Vector | repeated | | +{{- end -}} + +{{- define "_scheme:payload.v1.Remove" }} + message Remove { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Remove" }} + - Remove + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Remove.Config" }} + message Remove.Config { + bool skip_strict_exist_check = 1; + int64 timestamp = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Remove.Config" }} + - Remove.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | timestamp | int64 | | Remove timestamp. | +{{- end -}} + +{{- define "_scheme:payload.v1.Remove.MultiRequest" }} + message Remove.MultiRequest { + repeated Remove.Request requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Remove.MultiRequest" }} + - Remove.MultiRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Remove.Request | repeated | Represent the multiple remove request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Remove.Request" }} + message Remove.Request { + Object.ID id = 1; + Remove.Config config = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Remove.Request" }} + - Remove.Request + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | Object.ID | | The object ID to be removed. | + | config | Remove.Config | | The configuration of the remove request. | +{{- end -}} + +{{- define "_scheme:payload.v1.Remove.Timestamp" }} + message Remove.Timestamp { + int64 timestamp = 1; + Remove.Timestamp.Operator operator = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Remove.Timestamp" }} + - Remove.Timestamp + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | timestamp | int64 | | The timestamp. | + | operator | Remove.Timestamp.Operator | | The conditional operator. | +{{- end -}} + +{{- define "_scheme:payload.v1.Remove.TimestampRequest" }} + message Remove.TimestampRequest { + repeated Remove.Timestamp timestamps = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Remove.TimestampRequest" }} + - Remove.TimestampRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | timestamps | Remove.Timestamp | repeated | The timestamp comparison list. If more than one is specified, the `AND` +search is applied. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search" }} + message Search { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Search" }} + - Search + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Search.Config" }} + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.Config" }} + - Search.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.IDRequest" }} + message Search.IDRequest { + string id = 1; + Search.Config config = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.IDRequest" }} + - Search.IDRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | string | | The vector ID to be searched. | + | config | Search.Config | | The configuration of the search request. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.MultiIDRequest" }} + message Search.MultiIDRequest { + repeated Search.IDRequest requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.MultiIDRequest" }} + - Search.MultiIDRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Search.IDRequest | repeated | Represent the multiple search by ID request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.MultiObjectRequest" }} + message Search.MultiObjectRequest { + repeated Search.ObjectRequest requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.MultiObjectRequest" }} + - Search.MultiObjectRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Search.ObjectRequest | repeated | Represent the multiple search by binary object request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.MultiRequest" }} + message Search.MultiRequest { + repeated Search.Request requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.MultiRequest" }} + - Search.MultiRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Search.Request | repeated | Represent the multiple search request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.ObjectRequest" }} + message Search.ObjectRequest { + bytes object = 1; + Search.Config config = 2; + Filter.Target vectorizer = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.ObjectRequest" }} + - Search.ObjectRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | object | bytes | | The binary object to be searched. | + | config | Search.Config | | The configuration of the search request. | + | vectorizer | Filter.Target | | Filter configuration. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.Request" }} + message Search.Request { + repeated float vector = 1; + Search.Config config = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.Request" }} + - Search.Request + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | vector | float | repeated | The vector to be searched. | + | config | Search.Config | | The configuration of the search request. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.Response" }} + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.Response" }} + - Search.Response + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.Responses" }} + message Search.Responses { + repeated Search.Response responses = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.Responses" }} + - Search.Responses + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | responses | Search.Response | repeated | Represent the multiple search response content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Search.StreamResponse" }} + message Search.StreamResponse { + Search.Response response = 1; + google.rpc.Status status = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.StreamResponse" }} + - Search.StreamResponse + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | response | Search.Response | | Represent the search response. | + | status | google.rpc.Status | | The RPC error status. | +{{- end -}} + +{{- define "_scheme:payload.v1.Update" }} + message Update { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Update" }} + - Update + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Update.Config" }} + message Update.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } +{{- end -}} + +{{- define "_field:payload.v1.Update.Config" }} + - Update.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during update operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Update timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) +during update operation. | +{{- end -}} + +{{- define "_scheme:payload.v1.Update.MultiObjectRequest" }} + message Update.MultiObjectRequest { + repeated Update.ObjectRequest requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Update.MultiObjectRequest" }} + - Update.MultiObjectRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Update.ObjectRequest | repeated | Represent the multiple update object request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Update.MultiRequest" }} + message Update.MultiRequest { + repeated Update.Request requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Update.MultiRequest" }} + - Update.MultiRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Update.Request | repeated | Represent the multiple update request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Update.ObjectRequest" }} + message Update.ObjectRequest { + Object.Blob object = 1; + Update.Config config = 2; + Filter.Target vectorizer = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Update.ObjectRequest" }} + - Update.ObjectRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | object | Object.Blob | | The binary object to be updated. | + | config | Update.Config | | The configuration of the update request. | + | vectorizer | Filter.Target | | Filter target. | +{{- end -}} + +{{- define "_scheme:payload.v1.Update.Request" }} + message Update.Request { + Object.Vector vector = 1; + Update.Config config = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Update.Request" }} + - Update.Request + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | vector | Object.Vector | | The vector to be updated. | + | config | Update.Config | | The configuration of the update request. | +{{- end -}} + +{{- define "_scheme:payload.v1.Update.TimestampRequest" }} + message Update.TimestampRequest { + string id = 1; + int64 timestamp = 2; + bool force = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Update.TimestampRequest" }} + - Update.TimestampRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | id | string | | The vector ID. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + | force | bool | | force represents forcefully update the timestamp. | +{{- end -}} + +{{- define "_scheme:payload.v1.Upsert" }} + message Upsert { + // empty + } +{{- end -}} + +{{- define "_field:payload.v1.Upsert" }} + - Upsert + + empty +{{- end -}} + +{{- define "_scheme:payload.v1.Upsert.Config" }} + message Upsert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } +{{- end -}} + +{{- define "_field:payload.v1.Upsert.Config" }} + - Upsert.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Upsert timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) +during update operation. | +{{- end -}} + +{{- define "_scheme:payload.v1.Upsert.MultiObjectRequest" }} + message Upsert.MultiObjectRequest { + repeated Upsert.ObjectRequest requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Upsert.MultiObjectRequest" }} + - Upsert.MultiObjectRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Upsert.ObjectRequest | repeated | Represent the multiple upsert object request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Upsert.MultiRequest" }} + message Upsert.MultiRequest { + repeated Upsert.Request requests = 1; + } +{{- end -}} + +{{- define "_field:payload.v1.Upsert.MultiRequest" }} + - Upsert.MultiRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | requests | Upsert.Request | repeated | Represent the multiple upsert request content. | +{{- end -}} + +{{- define "_scheme:payload.v1.Upsert.ObjectRequest" }} + message Upsert.ObjectRequest { + Object.Blob object = 1; + Upsert.Config config = 2; + Filter.Target vectorizer = 3; + } +{{- end -}} + +{{- define "_field:payload.v1.Upsert.ObjectRequest" }} + - Upsert.ObjectRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | object | Object.Blob | | The binary object to be upserted. | + | config | Upsert.Config | | The configuration of the upsert request. | + | vectorizer | Filter.Target | | Filter target. | +{{- end -}} + +{{- define "_scheme:payload.v1.Upsert.Request" }} + message Upsert.Request { + Object.Vector vector = 1; + Upsert.Config config = 2; + } +{{- end -}} + +{{- define "_field:payload.v1.Upsert.Request" }} + - Upsert.Request + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | vector | Object.Vector | | The vector to be upserted. | + | config | Upsert.Config | | The configuration of the upsert request. | +{{- end -}} + +{{ define "_scheme:payload.v1.Remove.Timestamp.Operator" }} + enum Remove.Timestamp.Operator { + Eq = 0; + Ne = 1; + Ge = 2; + Gt = 3; + Le = 4; + Lt = 5; + } +{{- end -}} + +{{- define "_field:payload.v1.Remove.Timestamp.Operator" -}}{{- end -}} +{{ define "_scheme:payload.v1.Search.AggregationAlgorithm" }} + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } +{{- end -}} + +{{- define "_field:payload.v1.Search.AggregationAlgorithm" -}}{{- end -}} diff --git a/apis/docs/v1/payload.tmpl b/apis/docs/v1/payload.tmpl new file mode 100644 index 0000000000..1735a7cc69 --- /dev/null +++ b/apis/docs/v1/payload.tmpl @@ -0,0 +1,100 @@ +{{- $skip_types := list -}} +{{- range .Scalars -}} + {{- $skip_types = append $skip_types .ProtoType -}} +{{- end -}} + +{{- define "_bfs" -}} + {{- $g := .args.g -}} + {{- $q := .args.q -}} + {{- $v := .args.v -}} + {{- if $q -}} + {{- $head := first $q -}} + {{- if not (has $head $v) -}} + {{- $q = rest $q -}} + {{- range $e := (pluck $head .args.g | first) -}} + {{- $q = append $q $e -}} + {{- end -}} + {{- $v = append $v $head -}} + {{- $_ := set .args "q" $q -}} + {{- $_ := set .args "v" $v -}} + {{- if $q -}} + {{- template "_bfs" . -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{- define "bfs" -}} + {{- $_ := set .args "q" (list .args.start) -}} + {{- $_ := set .args "v" (list) -}} + {{- template "_bfs" . -}} + {{- $visited := list -}} + {{- range .args.v -}} + {{- $visited = append $visited . -}} + {{- end -}} + {{- $_ := set . "visited" $visited -}} +{{- end -}} + +{{- range .Files -}} + {{- $deps := dict -}} + {{- range .Messages -}} + {{- $fields := list -}} + {{- range .Fields -}} + {{- if not (or (has .LongType $skip_types) (hasPrefix "google" .FullType)) -}} + {{- $fields = append $fields .FullType -}} + {{- end -}} + {{- end -}} + {{- $_ := set $deps .FullName (uniq $fields) -}} + {{- end -}} + + {{- range .Messages -}} + {{- with $d := dict "args" (dict "g" $deps "start" .FullName) -}} + {{- template "bfs" $d -}} + {{- printf "{{- define \"scheme:%s\" -}}" $d.args.start -}} + {{- range $d.visited -}} + {{- printf "\n{{ template \"_scheme:%s\" }}" . -}} + {{- end -}} + {{- println "\n{{- end -}}" -}} + {{- printf "{{- define \"field:%s\" -}}" $d.args.start -}} + {{- range $d.visited -}} + {{- printf "\n{{ template \"_field:%s\" }}" . -}} + {{- end -}} + {{- println "\n{{- end -}}" -}} + {{- end -}} + {{- end -}} + + {{- range .Messages -}} + {{ printf "{{- define \"_scheme:%s\" }}" .FullName }} + message {{ .LongName }} { + {{- range $i, $ := .Fields }} + {{ if $.Label }}{{ $.Label }} {{end}}{{ $.LongType }} {{ $.Name }} = {{ add1 $i }}; + {{- else }} + // empty + {{- end }} + } + {{- println "\n{{- end -}}\n" -}} + + {{- printf "{{- define \"_field:%s\" }}\n" .FullName -}} + {{- printf " - %s\n" .LongName -}} + {{- if .HasFields }} + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + {{- range .Fields }} + | {{ .Name }} | {{ .LongType }} | {{.Label }} | {{ .Description }} | + {{- end }} + {{- else }} + empty + {{- end -}} + {{- println "\n{{- end -}}\n" -}} + {{- end -}} + {{- range .Enums -}} + {{- printf "{{ define \"_scheme:%s\" }}" .FullName }} + enum {{ .LongName }} { + {{- range .Values }} + {{ .Name }} = {{ .Number }}; + {{- end }} + } + {{- println "\n{{- end -}}\n" -}} + {{- printf "{{- define \"_field:%s\" -}}{{- end -}}\n" .FullName -}} + {{- end -}} +{{- end -}} diff --git a/apis/docs/v1/remove.md b/apis/docs/v1/remove.md new file mode 100644 index 0000000000..d2bd05b071 --- /dev/null +++ b/apis/docs/v1/remove.md @@ -0,0 +1,446 @@ +# Vald Remove APIs + +## Overview + +Remove Service is responsible for removing vectors indexed in the `vald-agent`. + +```rpc +service Remove { + + rpc Remove(payload.v1.Remove.Request) returns (payload.v1.Object.Location) {} + rpc RemoveByTimestamp(payload.v1.Remove.TimestampRequest) returns (payload.v1.Object.Locations) {} + rpc StreamRemove(payload.v1.Remove.Request) returns (payload.v1.Object.StreamLocation) {} + rpc MultiRemove(payload.v1.Remove.MultiRequest) returns (payload.v1.Object.Locations) {} + +} +``` + +## Remove RPC + +Remove RPC is the method to remove a single vector. + +### Input + +- the scheme of `payload.v1.Remove.Request` + + ```rpc + message Remove.Request { + Object.ID id = 1; + Remove.Config config = 2; + } + + message Object.ID { + string id = 1; + } + + message Remove.Config { + bool skip_strict_exist_check = 1; + int64 timestamp = 2; + } + + ``` + + - Remove.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | Object.ID | | The object ID to be removed. | + | config | Remove.Config | | The configuration of the remove request. | + + - Object.ID + + | field | type | label | description | + | :---: | :----- | :---- | :---------- | + | id | string | | | + + - Remove.Config + + | field | type | label | description | + | :---------------------: | :---- | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | timestamp | int64 | | Remove timestamp. | + +### Output + +- the scheme of `payload.v1.Object.Location` + + ```rpc + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## RemoveByTimestamp RPC + +RemoveByTimestamp RPC is the method to remove vectors based on timestamp. + +
+In the TimestampRequest message, the 'timestamps' field is repeated, allowing the inclusion of multiple Timestamp.
+When multiple Timestamps are provided, it results in an `AND` condition, enabling the realization of deletions with specified ranges.
+This design allows for versatile deletion operations, facilitating tasks such as removing data within a specific time range. +
+ +### Input + +- the scheme of `payload.v1.Remove.TimestampRequest` + + ```rpc + message Remove.TimestampRequest { + repeated Remove.Timestamp timestamps = 1; + } + + message Remove.Timestamp { + int64 timestamp = 1; + Remove.Timestamp.Operator operator = 2; + } + + enum Remove.Timestamp.Operator { + Eq = 0; + Ne = 1; + Ge = 2; + Gt = 3; + Le = 4; + Lt = 5; + } + + ``` + + - Remove.TimestampRequest + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | timestamps | Remove.Timestamp | repeated | The timestamp comparison list. If more than one is specified, the `AND` + + search is applied. | + + - Remove.Timestamp + + | field | type | label | description | + | :-------: | :------------------------ | :---- | :------------------------ | + | timestamp | int64 | | The timestamp. | + | operator | Remove.Timestamp.Operator | | The conditional operator. | + +### Output + +- the scheme of `payload.v1.Object.Locations` + + ```rpc + message Object.Locations { + repeated Object.Location locations = 1; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Locations + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------- | + | locations | Object.Location | repeated | | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :---------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | No vectors in the system match the specified timestamp conditions. | Check whether vectors matching the specified timestamp conditions exist in the system, and fix conditions if needed. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamRemove RPC + +A method to remove multiple indexed vectors by bidirectional streaming. + +StreamRemove RPC is the method to remove multiple vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the remove request can be communicated in any order between client and server. +Each Remove request and response are independent. +It's the recommended method to remove a large number of vectors. + +### Input + +- the scheme of `payload.v1.Remove.Request` + + ```rpc + message Remove.Request { + Object.ID id = 1; + Remove.Config config = 2; + } + + message Object.ID { + string id = 1; + } + + message Remove.Config { + bool skip_strict_exist_check = 1; + int64 timestamp = 2; + } + + ``` + + - Remove.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | Object.ID | | The object ID to be removed. | + | config | Remove.Config | | The configuration of the remove request. | + + - Object.ID + + | field | type | label | description | + | :---: | :----- | :---- | :---------- | + | id | string | | | + + - Remove.Config + + | field | type | label | description | + | :---------------------: | :---- | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | timestamp | int64 | | Remove timestamp. | + +### Output + +- the scheme of `payload.v1.Object.StreamLocation` + + ```rpc + message Object.StreamLocation { + Object.Location location = 1; + google.rpc.Status status = 2; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.StreamLocation + + | field | type | label | description | + | :------: | :---------------- | :---- | :-------------------- | + | location | Object.Location | | The vector location. | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## MultiRemove RPC + +MultiRemove is the method to remove multiple vectors in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Remove.MultiRequest` + + ```rpc + message Remove.MultiRequest { + repeated Remove.Request requests = 1; + } + + message Remove.Request { + Object.ID id = 1; + Remove.Config config = 2; + } + + message Object.ID { + string id = 1; + } + + message Remove.Config { + bool skip_strict_exist_check = 1; + int64 timestamp = 2; + } + + ``` + + - Remove.MultiRequest + + | field | type | label | description | + | :------: | :------------- | :------- | :--------------------------------------------- | + | requests | Remove.Request | repeated | Represent the multiple remove request content. | + + - Remove.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | Object.ID | | The object ID to be removed. | + | config | Remove.Config | | The configuration of the remove request. | + + - Object.ID + + | field | type | label | description | + | :---: | :----- | :---- | :---------- | + | id | string | | | + + - Remove.Config + + | field | type | label | description | + | :---------------------: | :---- | :---- | :-------------------------------------------------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | timestamp | int64 | | Remove timestamp. | + +### Output + +- the scheme of `payload.v1.Object.Locations` + + ```rpc + message Object.Locations { + repeated Object.Location locations = 1; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Locations + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------- | + | locations | Object.Location | repeated | | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | diff --git a/apis/docs/v1/search.md b/apis/docs/v1/search.md new file mode 100644 index 0000000000..58d3a267f0 --- /dev/null +++ b/apis/docs/v1/search.md @@ -0,0 +1,1888 @@ +# Vald Search APIs + +## Overview + +Search Service is responsible for searching vectors similar to the user request vector from `vald-agent`. + +```rpc +service Search { + + rpc Search(payload.v1.Search.Request) returns (payload.v1.Search.Response) {} + rpc SearchByID(payload.v1.Search.IDRequest) returns (payload.v1.Search.Response) {} + rpc StreamSearch(payload.v1.Search.Request) returns (payload.v1.Search.StreamResponse) {} + rpc StreamSearchByID(payload.v1.Search.IDRequest) returns (payload.v1.Search.StreamResponse) {} + rpc MultiSearch(payload.v1.Search.MultiRequest) returns (payload.v1.Search.Responses) {} + rpc MultiSearchByID(payload.v1.Search.MultiIDRequest) returns (payload.v1.Search.Responses) {} + rpc LinearSearch(payload.v1.Search.Request) returns (payload.v1.Search.Response) {} + rpc LinearSearchByID(payload.v1.Search.IDRequest) returns (payload.v1.Search.Response) {} + rpc StreamLinearSearch(payload.v1.Search.Request) returns (payload.v1.Search.StreamResponse) {} + rpc StreamLinearSearchByID(payload.v1.Search.IDRequest) returns (payload.v1.Search.StreamResponse) {} + rpc MultiLinearSearch(payload.v1.Search.MultiRequest) returns (payload.v1.Search.Responses) {} + rpc MultiLinearSearchByID(payload.v1.Search.MultiIDRequest) returns (payload.v1.Search.Responses) {} + +} +``` + +## Search RPC + +Search RPC is the method to search vector(s) similar to the request vector. + +### Input + +- the scheme of `payload.v1.Search.Request` + + ```rpc + message Search.Request { + repeated float vector = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.Request + + | field | type | label | description | + | :----: | :------------ | :------- | :--------------------------------------- | + | vector | float | repeated | The vector to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.Response` + + ```rpc + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## SearchByID RPC + +SearchByID RPC is the method to search similar vectors using a user-defined vector ID.
+The vector with the same requested ID should be indexed into the `vald-agent` before searching. + +### Input + +- the scheme of `payload.v1.Search.IDRequest` + + ```rpc + message Search.IDRequest { + string id = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.IDRequest + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | string | | The vector ID to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.Response` + + ```rpc + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamSearch RPC + +StreamSearch RPC is the method to search vectors with multi queries(vectors) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the search request can be communicated in any order between the client and server. +Each Search request and response are independent. + +### Input + +- the scheme of `payload.v1.Search.Request` + + ```rpc + message Search.Request { + repeated float vector = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.Request + + | field | type | label | description | + | :----: | :------------ | :------- | :--------------------------------------- | + | vector | float | repeated | The vector to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.StreamResponse` + + ```rpc + message Search.StreamResponse { + Search.Response response = 1; + google.rpc.Status status = 2; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.StreamResponse + + | field | type | label | description | + | :------: | :---------------- | :---- | :----------------------------- | + | response | Search.Response | | Represent the search response. | + | status | google.rpc.Status | | The RPC error status. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamSearchByID RPC + +StreamSearchByID RPC is the method to search vectors with multi queries(IDs) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the search request can be communicated in any order between the client and server. +Each SearchByID request and response are independent. + +### Input + +- the scheme of `payload.v1.Search.IDRequest` + + ```rpc + message Search.IDRequest { + string id = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.IDRequest + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | string | | The vector ID to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.StreamResponse` + + ```rpc + message Search.StreamResponse { + Search.Response response = 1; + google.rpc.Status status = 2; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.StreamResponse + + | field | type | label | description | + | :------: | :---------------- | :---- | :----------------------------- | + | response | Search.Response | | Represent the search response. | + | status | google.rpc.Status | | The RPC error status. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## MultiSearch RPC + +MultiSearch RPC is the method to search vectors with multiple vectors in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Search.MultiRequest` + + ```rpc + message Search.MultiRequest { + repeated Search.Request requests = 1; + } + + message Search.Request { + repeated float vector = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.MultiRequest + + | field | type | label | description | + | :------: | :------------- | :------- | :--------------------------------------------- | + | requests | Search.Request | repeated | Represent the multiple search request content. | + + - Search.Request + + | field | type | label | description | + | :----: | :------------ | :------- | :--------------------------------------- | + | vector | float | repeated | The vector to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.Responses` + + ```rpc + message Search.Responses { + repeated Search.Response responses = 1; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Responses + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------------------------------------------- | + | responses | Search.Response | repeated | Represent the multiple search response content. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## MultiSearchByID RPC + +MultiSearchByID RPC is the method to search vectors with multiple IDs in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Search.MultiIDRequest` + + ```rpc + message Search.MultiIDRequest { + repeated Search.IDRequest requests = 1; + } + + message Search.IDRequest { + string id = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.MultiIDRequest + + | field | type | label | description | + | :------: | :--------------- | :------- | :--------------------------------------------------- | + | requests | Search.IDRequest | repeated | Represent the multiple search by ID request content. | + + - Search.IDRequest + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | string | | The vector ID to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.Responses` + + ```rpc + message Search.Responses { + repeated Search.Response responses = 1; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Responses + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------------------------------------------- | + | responses | Search.Response | repeated | Represent the multiple search response content. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## LinearSearch RPC + +LinearSearch RPC is the method to linear search vector(s) similar to the request vector. + +### Input + +- the scheme of `payload.v1.Search.Request` + + ```rpc + message Search.Request { + repeated float vector = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.Request + + | field | type | label | description | + | :----: | :------------ | :------- | :--------------------------------------- | + | vector | float | repeated | The vector to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.Response` + + ```rpc + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## LinearSearchByID RPC + +LinearSearchByID RPC is the method to linear search similar vectors using a user-defined vector ID.
+The vector with the same requested ID should be indexed into the `vald-agent` before searching. +You will get a `NOT_FOUND` error if the vector isn't stored. + +### Input + +- the scheme of `payload.v1.Search.IDRequest` + + ```rpc + message Search.IDRequest { + string id = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.IDRequest + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | string | | The vector ID to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.Response` + + ```rpc + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamLinearSearch RPC + +StreamLinearSearch RPC is the method to linear search vectors with multi queries(vectors) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the linear search request can be communicated in any order between the client and server. +Each LinearSearch request and response are independent. + +### Input + +- the scheme of `payload.v1.Search.Request` + + ```rpc + message Search.Request { + repeated float vector = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.Request + + | field | type | label | description | + | :----: | :------------ | :------- | :--------------------------------------- | + | vector | float | repeated | The vector to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.StreamResponse` + + ```rpc + message Search.StreamResponse { + Search.Response response = 1; + google.rpc.Status status = 2; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.StreamResponse + + | field | type | label | description | + | :------: | :---------------- | :---- | :----------------------------- | + | response | Search.Response | | Represent the search response. | + | status | google.rpc.Status | | The RPC error status. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamLinearSearchByID RPC + +StreamLinearSearchByID RPC is the method to linear search vectors with multi queries(IDs) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the linear search request can be communicated in any order between the client and server. +Each LinearSearchByID request and response are independent. + +### Input + +- the scheme of `payload.v1.Search.IDRequest` + + ```rpc + message Search.IDRequest { + string id = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.IDRequest + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | string | | The vector ID to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.StreamResponse` + + ```rpc + message Search.StreamResponse { + Search.Response response = 1; + google.rpc.Status status = 2; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.StreamResponse + + | field | type | label | description | + | :------: | :---------------- | :---- | :----------------------------- | + | response | Search.Response | | Represent the search response. | + | status | google.rpc.Status | | The RPC error status. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## MultiLinearSearch RPC + +MultiLinearSearch RPC is the method to linear search vectors with multiple vectors in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Search.MultiRequest` + + ```rpc + message Search.MultiRequest { + repeated Search.Request requests = 1; + } + + message Search.Request { + repeated float vector = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.MultiRequest + + | field | type | label | description | + | :------: | :------------- | :------- | :--------------------------------------------- | + | requests | Search.Request | repeated | Represent the multiple search request content. | + + - Search.Request + + | field | type | label | description | + | :----: | :------------ | :------- | :--------------------------------------- | + | vector | float | repeated | The vector to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.Responses` + + ```rpc + message Search.Responses { + repeated Search.Response responses = 1; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Responses + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------------------------------------------- | + | responses | Search.Response | repeated | Represent the multiple search response content. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## MultiLinearSearchByID RPC + +MultiLinearSearchByID RPC is the method to linear search vectors with multiple IDs in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+// + +### Input + +- the scheme of `payload.v1.Search.MultiIDRequest` + + ```rpc + message Search.MultiIDRequest { + repeated Search.IDRequest requests = 1; + } + + message Search.IDRequest { + string id = 1; + Search.Config config = 2; + } + + message Search.Config { + string request_id = 1; + uint32 num = 2; + float radius = 3; + float epsilon = 4; + int64 timeout = 5; + Filter.Config ingress_filters = 6; + Filter.Config egress_filters = 7; + uint32 min_num = 8; + Search.AggregationAlgorithm aggregation_algorithm = 9; + google.protobuf.FloatValue ratio = 10; + uint32 nprobe = 11; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + enum Search.AggregationAlgorithm { + Unknown = 0; + ConcurrentQueue = 1; + SortSlice = 2; + SortPoolSlice = 3; + PairingHeap = 4; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Search.MultiIDRequest + + | field | type | label | description | + | :------: | :--------------- | :------- | :--------------------------------------------------- | + | requests | Search.IDRequest | repeated | Represent the multiple search by ID request content. | + + - Search.IDRequest + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | id | string | | The vector ID to be searched. | + | config | Search.Config | | The configuration of the search request. | + + - Search.Config + + | field | type | label | description | + | :-------------------: | :-------------------------- | :---- | :------------------------------------------- | + | request_id | string | | Unique request ID. | + | num | uint32 | | Maximum number of result to be returned. | + | radius | float | | Search radius. | + | epsilon | float | | Search coefficient. | + | timeout | int64 | | Search timeout in nanoseconds. | + | ingress_filters | Filter.Config | | Ingress filter configurations. | + | egress_filters | Filter.Config | | Egress filter configurations. | + | min_num | uint32 | | Minimum number of result to be returned. | + | aggregation_algorithm | Search.AggregationAlgorithm | | Aggregation Algorithm | + | ratio | google.protobuf.FloatValue | | Search ratio for agent return result number. | + | nprobe | uint32 | | Search nprobe. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Search.Responses` + + ```rpc + message Search.Responses { + repeated Search.Response responses = 1; + } + + message Search.Response { + string request_id = 1; + repeated Object.Distance results = 2; + } + + message Object.Distance { + string id = 1; + float distance = 2; + } + + ``` + + - Search.Responses + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------------------------------------------- | + | responses | Search.Response | repeated | Represent the multiple search response content. | + + - Search.Response + + | field | type | label | description | + | :--------: | :-------------- | :------- | :--------------------- | + | request_id | string | | The unique request ID. | + | results | Object.Distance | repeated | Search results. | + + - Object.Distance + + | field | type | label | description | + | :------: | :----- | :---- | :------------- | + | id | string | | The vector ID. | + | distance | float | | The distance. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | diff --git a/apis/docs/v1/update.md b/apis/docs/v1/update.md new file mode 100644 index 0000000000..4ecee8eb3d --- /dev/null +++ b/apis/docs/v1/update.md @@ -0,0 +1,499 @@ +# Vald Update APIs + +## Overview + +Update Service updates to new vector from inserted vector in the `vald-agent` components. + +```rpc +service Update { + + rpc Update(payload.v1.Update.Request) returns (payload.v1.Object.Location) {} + rpc StreamUpdate(payload.v1.Update.Request) returns (payload.v1.Object.StreamLocation) {} + rpc MultiUpdate(payload.v1.Update.MultiRequest) returns (payload.v1.Object.Locations) {} + rpc UpdateTimestamp(payload.v1.Update.TimestampRequest) returns (payload.v1.Object.Location) {} + +} +``` + +## Update RPC + +Update RPC is the method to update a single vector. + +### Input + +- the scheme of `payload.v1.Update.Request` + + ```rpc + message Update.Request { + Object.Vector vector = 1; + Update.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Update.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Update.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be updated. | + | config | Update.Config | | The configuration of the update request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Update.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during update operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Update timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.Location` + + ```rpc + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 6 | ALREADY_EXISTS | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| ALREADY_EXISTS | Request pair of ID and vector is already inserted. | Change request ID. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamUpdate RPC + +StreamUpdate RPC is the method to update multiple vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the update request can be communicated in any order between client and server. +Each Update request and response are independent. +It's the recommended method to update the large amount of vectors. + +### Input + +- the scheme of `payload.v1.Update.Request` + + ```rpc + message Update.Request { + Object.Vector vector = 1; + Update.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Update.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Update.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be updated. | + | config | Update.Config | | The configuration of the update request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Update.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during update operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Update timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.StreamLocation` + + ```rpc + message Object.StreamLocation { + Object.Location location = 1; + google.rpc.Status status = 2; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.StreamLocation + + | field | type | label | description | + | :------: | :---------------- | :---- | :-------------------- | + | location | Object.Location | | The vector location. | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 6 | ALREADY_EXISTS | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| ALREADY_EXISTS | Request pair of ID and vector is already inserted. | Change request ID. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## MultiUpdate RPC + +MultiUpdate is the method to update multiple vectors in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Update.MultiRequest` + + ```rpc + message Update.MultiRequest { + repeated Update.Request requests = 1; + } + + message Update.Request { + Object.Vector vector = 1; + Update.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Update.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Update.MultiRequest + + | field | type | label | description | + | :------: | :------------- | :------- | :--------------------------------------------- | + | requests | Update.Request | repeated | Represent the multiple update request content. | + + - Update.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be updated. | + | config | Update.Config | | The configuration of the update request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Update.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during update operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Update timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.Locations` + + ```rpc + message Object.Locations { + repeated Object.Location locations = 1; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Locations + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------- | + | locations | Object.Location | repeated | | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 6 | ALREADY_EXISTS | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | +| ALREADY_EXISTS | Request pair of ID and vector is already inserted. | Change request ID. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## UpdateTimestamp RPC + +A method to update timestamp an indexed vector. + +### Input + +- the scheme of `payload.v1.Update.TimestampRequest` + + ```rpc + message Update.TimestampRequest { + string id = 1; + int64 timestamp = 2; + bool force = 3; + } + + ``` + + - Update.TimestampRequest + + | field | type | label | description | + | :-------: | :----- | :---- | :------------------------------------------------ | + | id | string | | The vector ID. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + | force | bool | | force represents forcefully update the timestamp. | + +### Output + +- the scheme of `payload.v1.Object.Location` + + ```rpc + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------- | + +TODO + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +TODO diff --git a/apis/docs/v1/upsert.md b/apis/docs/v1/upsert.md new file mode 100644 index 0000000000..13de9925b2 --- /dev/null +++ b/apis/docs/v1/upsert.md @@ -0,0 +1,436 @@ +# Vald Upsert APIs + +## Overview + +Upsert Service is responsible for updating existing vectors in the `vald-agent` or inserting new vectors into the `vald-agent` if the vector does not exist. + +```rpc +service Upsert { + + rpc Upsert(payload.v1.Upsert.Request) returns (payload.v1.Object.Location) {} + rpc StreamUpsert(payload.v1.Upsert.Request) returns (payload.v1.Object.StreamLocation) {} + rpc MultiUpsert(payload.v1.Upsert.MultiRequest) returns (payload.v1.Object.Locations) {} + +} +``` + +## Upsert RPC + +Upsert RPC is the method to update the inserted vector to a new single vector or add a new single vector if not inserted before. + +### Input + +- the scheme of `payload.v1.Upsert.Request` + + ```rpc + message Upsert.Request { + Object.Vector vector = 1; + Upsert.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Upsert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Upsert.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be upserted. | + | config | Upsert.Config | | The configuration of the upsert request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Upsert.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Upsert timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.Location` + + ```rpc + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 6 | ALREADY_EXISTS | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| ALREADY_EXISTS | Requested pair of ID and vector is already inserted | Change request payload or nothing to do if update is unnecessary. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## StreamUpsert RPC + +StreamUpsert RPC is the method to update multiple existing vectors or add new multiple vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+Using the bidirectional streaming RPC, the upsert request can be communicated in any order between the client and server. +Each Upsert request and response are independent. +It’s the recommended method to upsert a large number of vectors. + +### Input + +- the scheme of `payload.v1.Upsert.Request` + + ```rpc + message Upsert.Request { + Object.Vector vector = 1; + Upsert.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Upsert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Upsert.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be upserted. | + | config | Upsert.Config | | The configuration of the upsert request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Upsert.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Upsert timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.StreamLocation` + + ```rpc + message Object.StreamLocation { + Object.Location location = 1; + google.rpc.Status status = 2; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.StreamLocation + + | field | type | label | description | + | :------: | :---------------- | :---- | :-------------------- | + | location | Object.Location | | The vector location. | + | status | google.rpc.Status | | The RPC error status. | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 6 | ALREADY_EXISTS | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| ALREADY_EXISTS | Requested pair of ID and vector is already inserted | Change request payload or nothing to do if update is unnecessary. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | + +## MultiUpsert RPC + +MultiUpsert is the method to update existing multiple vectors and add new multiple vectors in **1** request. + +
+gRPC has a message size limitation.
+Please be careful that the size of the request exceeds the limit. +
+ +### Input + +- the scheme of `payload.v1.Upsert.MultiRequest` + + ```rpc + message Upsert.MultiRequest { + repeated Upsert.Request requests = 1; + } + + message Upsert.Request { + Object.Vector vector = 1; + Upsert.Config config = 2; + } + + message Object.Vector { + string id = 1; + repeated float vector = 2; + int64 timestamp = 3; + } + + message Upsert.Config { + bool skip_strict_exist_check = 1; + Filter.Config filters = 2; + int64 timestamp = 3; + bool disable_balanced_update = 4; + } + + message Filter.Config { + repeated Filter.Target targets = 1; + } + + message Filter.Target { + string host = 1; + uint32 port = 2; + } + + ``` + + - Upsert.MultiRequest + + | field | type | label | description | + | :------: | :------------- | :------- | :--------------------------------------------- | + | requests | Upsert.Request | repeated | Represent the multiple upsert request content. | + + - Upsert.Request + + | field | type | label | description | + | :----: | :------------ | :---- | :--------------------------------------- | + | vector | Object.Vector | | The vector to be upserted. | + | config | Upsert.Config | | The configuration of the upsert request. | + + - Object.Vector + + | field | type | label | description | + | :-------: | :----- | :------- | :---------------------------------------------- | + | id | string | | The vector ID. | + | vector | float | repeated | The vector. | + | timestamp | int64 | | timestamp represents when this vector inserted. | + + - Upsert.Config + + | field | type | label | description | + | :---: | :--- | :---- | :---------- | + | skip_strict_exist_check | bool | | A flag to skip exist check during upsert operation. | + | filters | Filter.Config | | Filter configuration. | + | timestamp | int64 | | Upsert timestamp. | + | disable_balanced_update | bool | | A flag to disable balanced update (split remove -> insert operation) + + during update operation. | + + - Filter.Config + + | field | type | label | description | + | :-----: | :------------ | :------- | :----------------------------------------- | + | targets | Filter.Target | repeated | Represent the filter target configuration. | + + - Filter.Target + + | field | type | label | description | + | :---: | :----- | :---- | :------------------- | + | host | string | | The target hostname. | + | port | uint32 | | The target port. | + +### Output + +- the scheme of `payload.v1.Object.Locations` + + ```rpc + message Object.Locations { + repeated Object.Location locations = 1; + } + + message Object.Location { + string name = 1; + string uuid = 2; + repeated string ips = 3; + } + + ``` + + - Object.Locations + + | field | type | label | description | + | :-------: | :-------------- | :------- | :---------- | + | locations | Object.Location | repeated | | + + - Object.Location + + | field | type | label | description | + | :---: | :----- | :------- | :------------------------ | + | name | string | | The name of the location. | + | uuid | string | | The UUID of the vector. | + | ips | string | repeated | The IP list. | + +### Status Code + +| code | description | +| :--: | :---------------- | +| 0 | OK | +| 1 | CANCELLED | +| 3 | INVALID_ARGUMENT | +| 4 | DEADLINE_EXCEEDED | +| 5 | NOT_FOUND | +| 6 | ALREADY_EXISTS | +| 10 | ABORTED | +| 13 | INTERNAL | + +Please refer to [Response Status Code](../status.md) for more details. + +### Troubleshooting + +The request process may not be completed when the response code is NOT `0 (OK)`. + +Here are some common reasons and how to resolve each error. + +| name | common reason | how to resolve | +| :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | +| CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | +| INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | +| DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | +| ALREADY_EXISTS | Requested pair of ID and vector is already inserted | Change request payload or nothing to do if update is unnecessary. | +| INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | diff --git a/apis/grpc/v1/agent/core/agent.pb.go b/apis/grpc/v1/agent/core/agent.pb.go index d1218c8a0b..da5098e660 100644 --- a/apis/grpc/v1/agent/core/agent.pb.go +++ b/apis/grpc/v1/agent/core/agent.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/agent/core/agent.proto diff --git a/apis/grpc/v1/agent/sidecar/sidecar.pb.go b/apis/grpc/v1/agent/sidecar/sidecar.pb.go index b2753d8f9c..6aa44f4fca 100644 --- a/apis/grpc/v1/agent/sidecar/sidecar.pb.go +++ b/apis/grpc/v1/agent/sidecar/sidecar.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/agent/sidecar/sidecar.proto diff --git a/apis/grpc/v1/agent/sidecar/sidecar_vtproto.pb.go b/apis/grpc/v1/agent/sidecar/sidecar_vtproto.pb.go index 91d291495c..04d744357f 100644 --- a/apis/grpc/v1/agent/sidecar/sidecar_vtproto.pb.go +++ b/apis/grpc/v1/agent/sidecar/sidecar_vtproto.pb.go @@ -36,7 +36,7 @@ const _ = grpc.SupportPackageIsVersion7 // SidecarClient is the client API for Sidecar service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type SidecarClient interface{} +type SidecarClient any type sidecarClient struct { cc grpc.ClientConnInterface diff --git a/apis/grpc/v1/discoverer/discoverer.pb.go b/apis/grpc/v1/discoverer/discoverer.pb.go index 0cc5631b6c..75aff7e636 100644 --- a/apis/grpc/v1/discoverer/discoverer.pb.go +++ b/apis/grpc/v1/discoverer/discoverer.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/discoverer/discoverer.proto diff --git a/apis/grpc/v1/filter/egress/egress_filter.pb.go b/apis/grpc/v1/filter/egress/egress_filter.pb.go index aad0e7225f..3e877f7b94 100644 --- a/apis/grpc/v1/filter/egress/egress_filter.pb.go +++ b/apis/grpc/v1/filter/egress/egress_filter.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/filter/egress/egress_filter.proto diff --git a/apis/grpc/v1/filter/ingress/ingress_filter.pb.go b/apis/grpc/v1/filter/ingress/ingress_filter.pb.go index 6a53d585e1..1c1e12a425 100644 --- a/apis/grpc/v1/filter/ingress/ingress_filter.pb.go +++ b/apis/grpc/v1/filter/ingress/ingress_filter.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/filter/ingress/ingress_filter.proto diff --git a/apis/grpc/v1/meta/meta.pb.go b/apis/grpc/v1/meta/meta.pb.go index 8c60e8d365..a8bf475441 100644 --- a/apis/grpc/v1/meta/meta.pb.go +++ b/apis/grpc/v1/meta/meta.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/meta/meta.proto diff --git a/apis/grpc/v1/mirror/mirror.pb.go b/apis/grpc/v1/mirror/mirror.pb.go index 62b22acc2a..2319140c0d 100644 --- a/apis/grpc/v1/mirror/mirror.pb.go +++ b/apis/grpc/v1/mirror/mirror.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/mirror/mirror.proto diff --git a/apis/grpc/v1/payload/payload.pb.go b/apis/grpc/v1/payload/payload.pb.go index 285d254b68..739393fbf5 100644 --- a/apis/grpc/v1/payload/payload.pb.go +++ b/apis/grpc/v1/payload/payload.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/payload/payload.proto @@ -166,9 +166,9 @@ func (Remove_Timestamp_Operator) EnumDescriptor() ([]byte, []int) { // Search related messages. type Search struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Search) Reset() { @@ -203,9 +203,9 @@ func (*Search) Descriptor() ([]byte, []int) { // Filter related messages. type Filter struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Filter) Reset() { @@ -240,9 +240,9 @@ func (*Filter) Descriptor() ([]byte, []int) { // Insert related messages. type Insert struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Insert) Reset() { @@ -277,9 +277,9 @@ func (*Insert) Descriptor() ([]byte, []int) { // Update related messages type Update struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Update) Reset() { @@ -314,9 +314,9 @@ func (*Update) Descriptor() ([]byte, []int) { // Upsert related messages. type Upsert struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Upsert) Reset() { @@ -351,9 +351,9 @@ func (*Upsert) Descriptor() ([]byte, []int) { // Remove related messages. type Remove struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Remove) Reset() { @@ -388,9 +388,9 @@ func (*Remove) Descriptor() ([]byte, []int) { // Flush related messages. type Flush struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Flush) Reset() { @@ -425,9 +425,9 @@ func (*Flush) Descriptor() ([]byte, []int) { // Common messages. type Object struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Object) Reset() { @@ -462,9 +462,9 @@ func (*Object) Descriptor() ([]byte, []int) { // Control related messages. type Control struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Control) Reset() { @@ -499,9 +499,9 @@ func (*Control) Descriptor() ([]byte, []int) { // Discoverer related messages. type Discoverer struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Discoverer) Reset() { @@ -536,9 +536,9 @@ func (*Discoverer) Descriptor() ([]byte, []int) { // Info related messages. type Info struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Info) Reset() { @@ -573,9 +573,9 @@ func (*Info) Descriptor() ([]byte, []int) { // Mirror related messages. type Mirror struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Mirror) Reset() { @@ -609,9 +609,9 @@ func (*Mirror) Descriptor() ([]byte, []int) { } type Meta struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Meta) Reset() { @@ -646,9 +646,9 @@ func (*Meta) Descriptor() ([]byte, []int) { // Represent an empty message. type Empty struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Empty) Reset() { @@ -683,13 +683,14 @@ func (*Empty) Descriptor() ([]byte, []int) { // Represent a search request. type Search_Request struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector to be searched. - Vector []float32 ` protobuf:"fixed32,1,rep,packed,name=vector,proto3" json:"vector,omitempty"` + Vector []float32 `protobuf:"fixed32,1,rep,packed,name=vector,proto3" json:"vector,omitempty"` // The configuration of the search request. - Config *Search_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Config *Search_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` } func (x *Search_Request) Reset() { @@ -738,11 +739,12 @@ func (x *Search_Request) GetConfig() *Search_Config { // Represent the multiple search request. type Search_MultiRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple search request content. - Requests []*Search_Request ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple search request content. + Requests []*Search_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Search_MultiRequest) Reset() { @@ -784,13 +786,14 @@ func (x *Search_MultiRequest) GetRequests() []*Search_Request { // Represent a search by ID request. type Search_IDRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector ID to be searched. - Id string ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // The configuration of the search request. - Config *Search_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Config *Search_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` } func (x *Search_IDRequest) Reset() { @@ -839,11 +842,12 @@ func (x *Search_IDRequest) GetConfig() *Search_Config { // Represent the multiple search by ID request. type Search_MultiIDRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple search by ID request content. - Requests []*Search_IDRequest ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple search by ID request content. + Requests []*Search_IDRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Search_MultiIDRequest) Reset() { @@ -885,15 +889,16 @@ func (x *Search_MultiIDRequest) GetRequests() []*Search_IDRequest { // Represent a search by binary object request. type Search_ObjectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The binary object to be searched. - Object []byte ` protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Object []byte `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` // The configuration of the search request. - Config *Search_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Config *Search_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` // Filter configuration. - Vectorizer *Filter_Target ` protobuf:"bytes,3,opt,name=vectorizer,proto3" json:"vectorizer,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Vectorizer *Filter_Target `protobuf:"bytes,3,opt,name=vectorizer,proto3" json:"vectorizer,omitempty"` } func (x *Search_ObjectRequest) Reset() { @@ -949,11 +954,12 @@ func (x *Search_ObjectRequest) GetVectorizer() *Filter_Target { // Represent the multiple search by binary object request. type Search_MultiObjectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple search by binary object request content. - Requests []*Search_ObjectRequest ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple search by binary object request content. + Requests []*Search_ObjectRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Search_MultiObjectRequest) Reset() { @@ -995,31 +1001,32 @@ func (x *Search_MultiObjectRequest) GetRequests() []*Search_ObjectRequest { // Represent search configuration. type Search_Config struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Unique request ID. - RequestId string ` protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` // Maximum number of result to be returned. - Num uint32 ` protobuf:"varint,2,opt,name=num,proto3" json:"num,omitempty"` + Num uint32 `protobuf:"varint,2,opt,name=num,proto3" json:"num,omitempty"` // Search radius. - Radius float32 ` protobuf:"fixed32,3,opt,name=radius,proto3" json:"radius,omitempty"` + Radius float32 `protobuf:"fixed32,3,opt,name=radius,proto3" json:"radius,omitempty"` // Search coefficient. - Epsilon float32 ` protobuf:"fixed32,4,opt,name=epsilon,proto3" json:"epsilon,omitempty"` + Epsilon float32 `protobuf:"fixed32,4,opt,name=epsilon,proto3" json:"epsilon,omitempty"` // Search timeout in nanoseconds. - Timeout int64 ` protobuf:"varint,5,opt,name=timeout,proto3" json:"timeout,omitempty"` + Timeout int64 `protobuf:"varint,5,opt,name=timeout,proto3" json:"timeout,omitempty"` // Ingress filter configurations. - IngressFilters *Filter_Config ` protobuf:"bytes,6,opt,name=ingress_filters,json=ingressFilters,proto3" json:"ingress_filters,omitempty"` + IngressFilters *Filter_Config `protobuf:"bytes,6,opt,name=ingress_filters,json=ingressFilters,proto3" json:"ingress_filters,omitempty"` // Egress filter configurations. - EgressFilters *Filter_Config ` protobuf:"bytes,7,opt,name=egress_filters,json=egressFilters,proto3" json:"egress_filters,omitempty"` + EgressFilters *Filter_Config `protobuf:"bytes,7,opt,name=egress_filters,json=egressFilters,proto3" json:"egress_filters,omitempty"` // Minimum number of result to be returned. - MinNum uint32 ` protobuf:"varint,8,opt,name=min_num,json=minNum,proto3" json:"min_num,omitempty"` + MinNum uint32 `protobuf:"varint,8,opt,name=min_num,json=minNum,proto3" json:"min_num,omitempty"` // Aggregation Algorithm - AggregationAlgorithm Search_AggregationAlgorithm ` protobuf:"varint,9,opt,name=aggregation_algorithm,json=aggregationAlgorithm,proto3,enum=payload.v1.Search_AggregationAlgorithm" json:"aggregation_algorithm,omitempty"` + AggregationAlgorithm Search_AggregationAlgorithm `protobuf:"varint,9,opt,name=aggregation_algorithm,json=aggregationAlgorithm,proto3,enum=payload.v1.Search_AggregationAlgorithm" json:"aggregation_algorithm,omitempty"` // Search ratio for agent return result number. - Ratio *wrapperspb.FloatValue ` protobuf:"bytes,10,opt,name=ratio,proto3" json:"ratio,omitempty"` + Ratio *wrapperspb.FloatValue `protobuf:"bytes,10,opt,name=ratio,proto3" json:"ratio,omitempty"` // Search nprobe. - Nprobe uint32 ` protobuf:"varint,11,opt,name=nprobe,proto3" json:"nprobe,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Nprobe uint32 `protobuf:"varint,11,opt,name=nprobe,proto3" json:"nprobe,omitempty"` } func (x *Search_Config) Reset() { @@ -1131,13 +1138,14 @@ func (x *Search_Config) GetNprobe() uint32 { // Represent a search response. type Search_Response struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The unique request ID. - RequestId string ` protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` // Search results. - Results []*Object_Distance ` protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Results []*Object_Distance `protobuf:"bytes,2,rep,name=results,proto3" json:"results,omitempty"` } func (x *Search_Response) Reset() { @@ -1186,11 +1194,12 @@ func (x *Search_Response) GetResults() []*Object_Distance { // Represent multiple search responses. type Search_Responses struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple search response content. - Responses []*Search_Response ` protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple search response content. + Responses []*Search_Response `protobuf:"bytes,1,rep,name=responses,proto3" json:"responses,omitempty"` } func (x *Search_Responses) Reset() { @@ -1232,14 +1241,15 @@ func (x *Search_Responses) GetResponses() []*Search_Response { // Represent stream search response. type Search_StreamResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Payload: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: // // *Search_StreamResponse_Response // *Search_StreamResponse_Status - Payload isSearch_StreamResponse_Payload ` protobuf_oneof:"payload"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Payload isSearch_StreamResponse_Payload `protobuf_oneof:"payload"` } func (x *Search_StreamResponse) Reset() { @@ -1272,27 +1282,23 @@ func (*Search_StreamResponse) Descriptor() ([]byte, []int) { return file_v1_payload_payload_proto_rawDescGZIP(), []int{0, 9} } -func (x *Search_StreamResponse) GetPayload() isSearch_StreamResponse_Payload { - if x != nil { - return x.Payload +func (m *Search_StreamResponse) GetPayload() isSearch_StreamResponse_Payload { + if m != nil { + return m.Payload } return nil } func (x *Search_StreamResponse) GetResponse() *Search_Response { - if x != nil { - if x, ok := x.Payload.(*Search_StreamResponse_Response); ok { - return x.Response - } + if x, ok := x.GetPayload().(*Search_StreamResponse_Response); ok { + return x.Response } return nil } func (x *Search_StreamResponse) GetStatus() *status.Status { - if x != nil { - if x, ok := x.Payload.(*Search_StreamResponse_Status); ok { - return x.Status - } + if x, ok := x.GetPayload().(*Search_StreamResponse_Status); ok { + return x.Status } return nil } @@ -1317,13 +1323,14 @@ func (*Search_StreamResponse_Status) isSearch_StreamResponse_Payload() {} // Represent the target filter server. type Filter_Target struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The target hostname. - Host string ` protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // The target port. - Port uint32 ` protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } func (x *Filter_Target) Reset() { @@ -1372,11 +1379,12 @@ func (x *Filter_Target) GetPort() uint32 { // Represent filter configuration. type Filter_Config struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the filter target configuration. - Targets []*Filter_Target ` protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the filter target configuration. + Targets []*Filter_Target `protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty"` } func (x *Filter_Config) Reset() { @@ -1418,13 +1426,14 @@ func (x *Filter_Config) GetTargets() []*Filter_Target { // Represent the insert request. type Insert_Request struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector to be inserted. - Vector *Object_Vector ` protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` + Vector *Object_Vector `protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` // The configuration of the insert request. - Config *Insert_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Config *Insert_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` } func (x *Insert_Request) Reset() { @@ -1473,11 +1482,12 @@ func (x *Insert_Request) GetConfig() *Insert_Config { // Represent the multiple insert request. type Insert_MultiRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent multiple insert request content. - Requests []*Insert_Request ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent multiple insert request content. + Requests []*Insert_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Insert_MultiRequest) Reset() { @@ -1519,15 +1529,16 @@ func (x *Insert_MultiRequest) GetRequests() []*Insert_Request { // Represent the insert by binary object request. type Insert_ObjectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The binary object to be inserted. - Object *Object_Blob ` protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Object *Object_Blob `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` // The configuration of the insert request. - Config *Insert_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Config *Insert_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` // Filter configurations. - Vectorizer *Filter_Target ` protobuf:"bytes,3,opt,name=vectorizer,proto3" json:"vectorizer,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Vectorizer *Filter_Target `protobuf:"bytes,3,opt,name=vectorizer,proto3" json:"vectorizer,omitempty"` } func (x *Insert_ObjectRequest) Reset() { @@ -1583,11 +1594,12 @@ func (x *Insert_ObjectRequest) GetVectorizer() *Filter_Target { // Represent the multiple insert by binary object request. type Insert_MultiObjectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent multiple insert by object content. - Requests []*Insert_ObjectRequest ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent multiple insert by object content. + Requests []*Insert_ObjectRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Insert_MultiObjectRequest) Reset() { @@ -1629,15 +1641,16 @@ func (x *Insert_MultiObjectRequest) GetRequests() []*Insert_ObjectRequest { // Represent insert configurations. type Insert_Config struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A flag to skip exist check during insert operation. - SkipStrictExistCheck bool ` protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` + SkipStrictExistCheck bool `protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` // Filter configurations. - Filters *Filter_Config ` protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` + Filters *Filter_Config `protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` // Insert timestamp. - Timestamp int64 ` protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (x *Insert_Config) Reset() { @@ -1693,13 +1706,14 @@ func (x *Insert_Config) GetTimestamp() int64 { // Represent the update request. type Update_Request struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector to be updated. - Vector *Object_Vector ` protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` + Vector *Object_Vector `protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` // The configuration of the update request. - Config *Update_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Config *Update_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` } func (x *Update_Request) Reset() { @@ -1748,11 +1762,12 @@ func (x *Update_Request) GetConfig() *Update_Config { // Represent the multiple update request. type Update_MultiRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple update request content. - Requests []*Update_Request ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple update request content. + Requests []*Update_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Update_MultiRequest) Reset() { @@ -1794,15 +1809,16 @@ func (x *Update_MultiRequest) GetRequests() []*Update_Request { // Represent the update binary object request. type Update_ObjectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The binary object to be updated. - Object *Object_Blob ` protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Object *Object_Blob `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` // The configuration of the update request. - Config *Update_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Config *Update_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` // Filter target. - Vectorizer *Filter_Target ` protobuf:"bytes,3,opt,name=vectorizer,proto3" json:"vectorizer,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Vectorizer *Filter_Target `protobuf:"bytes,3,opt,name=vectorizer,proto3" json:"vectorizer,omitempty"` } func (x *Update_ObjectRequest) Reset() { @@ -1858,11 +1874,12 @@ func (x *Update_ObjectRequest) GetVectorizer() *Filter_Target { // Represent the multiple update binary object request. type Update_MultiObjectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple update object request content. - Requests []*Update_ObjectRequest ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple update object request content. + Requests []*Update_ObjectRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Update_MultiObjectRequest) Reset() { @@ -1902,25 +1919,91 @@ func (x *Update_MultiObjectRequest) GetRequests() []*Update_ObjectRequest { return nil } +// Represent a vector meta data. +type Update_TimestampRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The vector ID. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // timestamp represents when this vector inserted. + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + // force represents forcefully update the timestamp. + Force bool `protobuf:"varint,3,opt,name=force,proto3" json:"force,omitempty"` +} + +func (x *Update_TimestampRequest) Reset() { + *x = Update_TimestampRequest{} + mi := &file_v1_payload_payload_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *Update_TimestampRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Update_TimestampRequest) ProtoMessage() {} + +func (x *Update_TimestampRequest) ProtoReflect() protoreflect.Message { + mi := &file_v1_payload_payload_proto_msgTypes[35] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Update_TimestampRequest.ProtoReflect.Descriptor instead. +func (*Update_TimestampRequest) Descriptor() ([]byte, []int) { + return file_v1_payload_payload_proto_rawDescGZIP(), []int{3, 4} +} + +func (x *Update_TimestampRequest) GetId() string { + if x != nil { + return x.Id + } + return "" +} + +func (x *Update_TimestampRequest) GetTimestamp() int64 { + if x != nil { + return x.Timestamp + } + return 0 +} + +func (x *Update_TimestampRequest) GetForce() bool { + if x != nil { + return x.Force + } + return false +} + // Represent the update configuration. type Update_Config struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A flag to skip exist check during update operation. - SkipStrictExistCheck bool ` protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` + SkipStrictExistCheck bool `protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` // Filter configuration. - Filters *Filter_Config ` protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` + Filters *Filter_Config `protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` // Update timestamp. - Timestamp int64 ` protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // A flag to disable balanced update (split remove -> insert operation) // during update operation. - DisableBalancedUpdate bool ` protobuf:"varint,4,opt,name=disable_balanced_update,json=disableBalancedUpdate,proto3" json:"disable_balanced_update,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + DisableBalancedUpdate bool `protobuf:"varint,4,opt,name=disable_balanced_update,json=disableBalancedUpdate,proto3" json:"disable_balanced_update,omitempty"` } func (x *Update_Config) Reset() { *x = Update_Config{} - mi := &file_v1_payload_payload_proto_msgTypes[35] + mi := &file_v1_payload_payload_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1932,7 +2015,7 @@ func (x *Update_Config) String() string { func (*Update_Config) ProtoMessage() {} func (x *Update_Config) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[35] + mi := &file_v1_payload_payload_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1945,7 +2028,7 @@ func (x *Update_Config) ProtoReflect() protoreflect.Message { // Deprecated: Use Update_Config.ProtoReflect.Descriptor instead. func (*Update_Config) Descriptor() ([]byte, []int) { - return file_v1_payload_payload_proto_rawDescGZIP(), []int{3, 4} + return file_v1_payload_payload_proto_rawDescGZIP(), []int{3, 5} } func (x *Update_Config) GetSkipStrictExistCheck() bool { @@ -1978,18 +2061,19 @@ func (x *Update_Config) GetDisableBalancedUpdate() bool { // Represent the upsert request. type Upsert_Request struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector to be upserted. - Vector *Object_Vector ` protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` + Vector *Object_Vector `protobuf:"bytes,1,opt,name=vector,proto3" json:"vector,omitempty"` // The configuration of the upsert request. - Config *Upsert_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Config *Upsert_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` } func (x *Upsert_Request) Reset() { *x = Upsert_Request{} - mi := &file_v1_payload_payload_proto_msgTypes[36] + mi := &file_v1_payload_payload_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2001,7 +2085,7 @@ func (x *Upsert_Request) String() string { func (*Upsert_Request) ProtoMessage() {} func (x *Upsert_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[36] + mi := &file_v1_payload_payload_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2033,16 +2117,17 @@ func (x *Upsert_Request) GetConfig() *Upsert_Config { // Represent mthe ultiple upsert request. type Upsert_MultiRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple upsert request content. - Requests []*Upsert_Request ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple upsert request content. + Requests []*Upsert_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Upsert_MultiRequest) Reset() { *x = Upsert_MultiRequest{} - mi := &file_v1_payload_payload_proto_msgTypes[37] + mi := &file_v1_payload_payload_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2054,7 +2139,7 @@ func (x *Upsert_MultiRequest) String() string { func (*Upsert_MultiRequest) ProtoMessage() {} func (x *Upsert_MultiRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[37] + mi := &file_v1_payload_payload_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2079,20 +2164,21 @@ func (x *Upsert_MultiRequest) GetRequests() []*Upsert_Request { // Represent the upsert binary object request. type Upsert_ObjectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The binary object to be upserted. - Object *Object_Blob ` protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Object *Object_Blob `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` // The configuration of the upsert request. - Config *Upsert_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` + Config *Upsert_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` // Filter target. - Vectorizer *Filter_Target ` protobuf:"bytes,3,opt,name=vectorizer,proto3" json:"vectorizer,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Vectorizer *Filter_Target `protobuf:"bytes,3,opt,name=vectorizer,proto3" json:"vectorizer,omitempty"` } func (x *Upsert_ObjectRequest) Reset() { *x = Upsert_ObjectRequest{} - mi := &file_v1_payload_payload_proto_msgTypes[38] + mi := &file_v1_payload_payload_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2104,7 +2190,7 @@ func (x *Upsert_ObjectRequest) String() string { func (*Upsert_ObjectRequest) ProtoMessage() {} func (x *Upsert_ObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[38] + mi := &file_v1_payload_payload_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2143,16 +2229,17 @@ func (x *Upsert_ObjectRequest) GetVectorizer() *Filter_Target { // Represent the multiple upsert binary object request. type Upsert_MultiObjectRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple upsert object request content. - Requests []*Upsert_ObjectRequest ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple upsert object request content. + Requests []*Upsert_ObjectRequest `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Upsert_MultiObjectRequest) Reset() { *x = Upsert_MultiObjectRequest{} - mi := &file_v1_payload_payload_proto_msgTypes[39] + mi := &file_v1_payload_payload_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2164,7 +2251,7 @@ func (x *Upsert_MultiObjectRequest) String() string { func (*Upsert_MultiObjectRequest) ProtoMessage() {} func (x *Upsert_MultiObjectRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[39] + mi := &file_v1_payload_payload_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2189,23 +2276,24 @@ func (x *Upsert_MultiObjectRequest) GetRequests() []*Upsert_ObjectRequest { // Represent the upsert configuration. type Upsert_Config struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A flag to skip exist check during upsert operation. - SkipStrictExistCheck bool ` protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` + SkipStrictExistCheck bool `protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` // Filter configuration. - Filters *Filter_Config ` protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` + Filters *Filter_Config `protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` // Upsert timestamp. - Timestamp int64 ` protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // A flag to disable balanced update (split remove -> insert operation) // during update operation. - DisableBalancedUpdate bool ` protobuf:"varint,4,opt,name=disable_balanced_update,json=disableBalancedUpdate,proto3" json:"disable_balanced_update,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + DisableBalancedUpdate bool `protobuf:"varint,4,opt,name=disable_balanced_update,json=disableBalancedUpdate,proto3" json:"disable_balanced_update,omitempty"` } func (x *Upsert_Config) Reset() { *x = Upsert_Config{} - mi := &file_v1_payload_payload_proto_msgTypes[40] + mi := &file_v1_payload_payload_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2217,7 +2305,7 @@ func (x *Upsert_Config) String() string { func (*Upsert_Config) ProtoMessage() {} func (x *Upsert_Config) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[40] + mi := &file_v1_payload_payload_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2263,18 +2351,19 @@ func (x *Upsert_Config) GetDisableBalancedUpdate() bool { // Represent the remove request. type Remove_Request struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The object ID to be removed. - Id *Object_ID ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Id *Object_ID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // The configuration of the remove request. - Config *Remove_Config ` protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Config *Remove_Config `protobuf:"bytes,2,opt,name=config,proto3" json:"config,omitempty"` } func (x *Remove_Request) Reset() { *x = Remove_Request{} - mi := &file_v1_payload_payload_proto_msgTypes[41] + mi := &file_v1_payload_payload_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2286,7 +2375,7 @@ func (x *Remove_Request) String() string { func (*Remove_Request) ProtoMessage() {} func (x *Remove_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[41] + mi := &file_v1_payload_payload_proto_msgTypes[42] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2318,16 +2407,17 @@ func (x *Remove_Request) GetConfig() *Remove_Config { // Represent the multiple remove request. type Remove_MultiRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Represent the multiple remove request content. - Requests []*Remove_Request ` protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represent the multiple remove request content. + Requests []*Remove_Request `protobuf:"bytes,1,rep,name=requests,proto3" json:"requests,omitempty"` } func (x *Remove_MultiRequest) Reset() { *x = Remove_MultiRequest{} - mi := &file_v1_payload_payload_proto_msgTypes[42] + mi := &file_v1_payload_payload_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2339,7 +2429,7 @@ func (x *Remove_MultiRequest) String() string { func (*Remove_MultiRequest) ProtoMessage() {} func (x *Remove_MultiRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[42] + mi := &file_v1_payload_payload_proto_msgTypes[43] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2364,17 +2454,18 @@ func (x *Remove_MultiRequest) GetRequests() []*Remove_Request { // Represent the remove request based on timestamp. type Remove_TimestampRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The timestamp comparison list. If more than one is specified, the `AND` // search is applied. - Timestamps []*Remove_Timestamp ` protobuf:"bytes,1,rep,name=timestamps,proto3" json:"timestamps,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Timestamps []*Remove_Timestamp `protobuf:"bytes,1,rep,name=timestamps,proto3" json:"timestamps,omitempty"` } func (x *Remove_TimestampRequest) Reset() { *x = Remove_TimestampRequest{} - mi := &file_v1_payload_payload_proto_msgTypes[43] + mi := &file_v1_payload_payload_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2386,7 +2477,7 @@ func (x *Remove_TimestampRequest) String() string { func (*Remove_TimestampRequest) ProtoMessage() {} func (x *Remove_TimestampRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[43] + mi := &file_v1_payload_payload_proto_msgTypes[44] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2411,18 +2502,19 @@ func (x *Remove_TimestampRequest) GetTimestamps() []*Remove_Timestamp { // Represent the timestamp comparison. type Remove_Timestamp struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The timestamp. - Timestamp int64 ` protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Timestamp int64 `protobuf:"varint,1,opt,name=timestamp,proto3" json:"timestamp,omitempty"` // The conditional operator. - Operator Remove_Timestamp_Operator ` protobuf:"varint,2,opt,name=operator,proto3,enum=payload.v1.Remove_Timestamp_Operator" json:"operator,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Operator Remove_Timestamp_Operator `protobuf:"varint,2,opt,name=operator,proto3,enum=payload.v1.Remove_Timestamp_Operator" json:"operator,omitempty"` } func (x *Remove_Timestamp) Reset() { *x = Remove_Timestamp{} - mi := &file_v1_payload_payload_proto_msgTypes[44] + mi := &file_v1_payload_payload_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2434,7 +2526,7 @@ func (x *Remove_Timestamp) String() string { func (*Remove_Timestamp) ProtoMessage() {} func (x *Remove_Timestamp) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[44] + mi := &file_v1_payload_payload_proto_msgTypes[45] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2466,18 +2558,19 @@ func (x *Remove_Timestamp) GetOperator() Remove_Timestamp_Operator { // Represent the remove configuration. type Remove_Config struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A flag to skip exist check during upsert operation. - SkipStrictExistCheck bool ` protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` + SkipStrictExistCheck bool `protobuf:"varint,1,opt,name=skip_strict_exist_check,json=skipStrictExistCheck,proto3" json:"skip_strict_exist_check,omitempty"` // Remove timestamp. - Timestamp int64 ` protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (x *Remove_Config) Reset() { *x = Remove_Config{} - mi := &file_v1_payload_payload_proto_msgTypes[45] + mi := &file_v1_payload_payload_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2489,7 +2582,7 @@ func (x *Remove_Config) String() string { func (*Remove_Config) ProtoMessage() {} func (x *Remove_Config) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[45] + mi := &file_v1_payload_payload_proto_msgTypes[46] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2520,14 +2613,14 @@ func (x *Remove_Config) GetTimestamp() int64 { } type Flush_Request struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Flush_Request) Reset() { *x = Flush_Request{} - mi := &file_v1_payload_payload_proto_msgTypes[46] + mi := &file_v1_payload_payload_proto_msgTypes[47] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2539,7 +2632,7 @@ func (x *Flush_Request) String() string { func (*Flush_Request) ProtoMessage() {} func (x *Flush_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[46] + mi := &file_v1_payload_payload_proto_msgTypes[47] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2557,18 +2650,19 @@ func (*Flush_Request) Descriptor() ([]byte, []int) { // Represent a request to fetch raw vector. type Object_VectorRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector ID to be fetched. - Id *Object_ID ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Id *Object_ID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Filter configurations. - Filters *Filter_Config ` protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Filters *Filter_Config `protobuf:"bytes,2,opt,name=filters,proto3" json:"filters,omitempty"` } func (x *Object_VectorRequest) Reset() { *x = Object_VectorRequest{} - mi := &file_v1_payload_payload_proto_msgTypes[47] + mi := &file_v1_payload_payload_proto_msgTypes[48] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2580,7 +2674,7 @@ func (x *Object_VectorRequest) String() string { func (*Object_VectorRequest) ProtoMessage() {} func (x *Object_VectorRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[47] + mi := &file_v1_payload_payload_proto_msgTypes[48] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2612,18 +2706,19 @@ func (x *Object_VectorRequest) GetFilters() *Filter_Config { // Represent the ID and distance pair. type Object_Distance struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector ID. - Id string ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // The distance. - Distance float32 ` protobuf:"fixed32,2,opt,name=distance,proto3" json:"distance,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Distance float32 `protobuf:"fixed32,2,opt,name=distance,proto3" json:"distance,omitempty"` } func (x *Object_Distance) Reset() { *x = Object_Distance{} - mi := &file_v1_payload_payload_proto_msgTypes[48] + mi := &file_v1_payload_payload_proto_msgTypes[49] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2635,7 +2730,7 @@ func (x *Object_Distance) String() string { func (*Object_Distance) ProtoMessage() {} func (x *Object_Distance) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[48] + mi := &file_v1_payload_payload_proto_msgTypes[49] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2667,19 +2762,20 @@ func (x *Object_Distance) GetDistance() float32 { // Represent stream response of distances. type Object_StreamDistance struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Payload: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: // // *Object_StreamDistance_Distance // *Object_StreamDistance_Status - Payload isObject_StreamDistance_Payload ` protobuf_oneof:"payload"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Payload isObject_StreamDistance_Payload `protobuf_oneof:"payload"` } func (x *Object_StreamDistance) Reset() { *x = Object_StreamDistance{} - mi := &file_v1_payload_payload_proto_msgTypes[49] + mi := &file_v1_payload_payload_proto_msgTypes[50] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2691,7 +2787,7 @@ func (x *Object_StreamDistance) String() string { func (*Object_StreamDistance) ProtoMessage() {} func (x *Object_StreamDistance) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[49] + mi := &file_v1_payload_payload_proto_msgTypes[50] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2707,27 +2803,23 @@ func (*Object_StreamDistance) Descriptor() ([]byte, []int) { return file_v1_payload_payload_proto_rawDescGZIP(), []int{7, 2} } -func (x *Object_StreamDistance) GetPayload() isObject_StreamDistance_Payload { - if x != nil { - return x.Payload +func (m *Object_StreamDistance) GetPayload() isObject_StreamDistance_Payload { + if m != nil { + return m.Payload } return nil } func (x *Object_StreamDistance) GetDistance() *Object_Distance { - if x != nil { - if x, ok := x.Payload.(*Object_StreamDistance_Distance); ok { - return x.Distance - } + if x, ok := x.GetPayload().(*Object_StreamDistance_Distance); ok { + return x.Distance } return nil } func (x *Object_StreamDistance) GetStatus() *status.Status { - if x != nil { - if x, ok := x.Payload.(*Object_StreamDistance_Status); ok { - return x.Status - } + if x, ok := x.GetPayload().(*Object_StreamDistance_Status); ok { + return x.Status } return nil } @@ -2752,15 +2844,16 @@ func (*Object_StreamDistance_Status) isObject_StreamDistance_Payload() {} // Represent the vector ID. type Object_ID struct { - state protoimpl.MessageState `protogen:"open.v1"` - Id string ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } func (x *Object_ID) Reset() { *x = Object_ID{} - mi := &file_v1_payload_payload_proto_msgTypes[50] + mi := &file_v1_payload_payload_proto_msgTypes[51] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2772,7 +2865,7 @@ func (x *Object_ID) String() string { func (*Object_ID) ProtoMessage() {} func (x *Object_ID) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[50] + mi := &file_v1_payload_payload_proto_msgTypes[51] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2797,15 +2890,16 @@ func (x *Object_ID) GetId() string { // Represent multiple vector IDs. type Object_IDs struct { - state protoimpl.MessageState `protogen:"open.v1"` - Ids []string ` protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ids []string `protobuf:"bytes,1,rep,name=ids,proto3" json:"ids,omitempty"` } func (x *Object_IDs) Reset() { *x = Object_IDs{} - mi := &file_v1_payload_payload_proto_msgTypes[51] + mi := &file_v1_payload_payload_proto_msgTypes[52] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2817,7 +2911,7 @@ func (x *Object_IDs) String() string { func (*Object_IDs) ProtoMessage() {} func (x *Object_IDs) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[51] + mi := &file_v1_payload_payload_proto_msgTypes[52] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2842,20 +2936,21 @@ func (x *Object_IDs) GetIds() []string { // Represent a vector. type Object_Vector struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector ID. - Id string ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // The vector. - Vector []float32 ` protobuf:"fixed32,2,rep,packed,name=vector,proto3" json:"vector,omitempty"` + Vector []float32 `protobuf:"fixed32,2,rep,packed,name=vector,proto3" json:"vector,omitempty"` // timestamp represents when this vector inserted. - Timestamp int64 ` protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Timestamp int64 `protobuf:"varint,3,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (x *Object_Vector) Reset() { *x = Object_Vector{} - mi := &file_v1_payload_payload_proto_msgTypes[52] + mi := &file_v1_payload_payload_proto_msgTypes[53] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2867,7 +2962,7 @@ func (x *Object_Vector) String() string { func (*Object_Vector) ProtoMessage() {} func (x *Object_Vector) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[52] + mi := &file_v1_payload_payload_proto_msgTypes[53] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2906,16 +3001,17 @@ func (x *Object_Vector) GetTimestamp() int64 { // Represent a request to fetch vector meta data. type Object_TimestampRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The vector ID to be fetched. - Id *Object_ID ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The vector ID to be fetched. + Id *Object_ID `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` } func (x *Object_TimestampRequest) Reset() { *x = Object_TimestampRequest{} - mi := &file_v1_payload_payload_proto_msgTypes[53] + mi := &file_v1_payload_payload_proto_msgTypes[54] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2927,7 +3023,7 @@ func (x *Object_TimestampRequest) String() string { func (*Object_TimestampRequest) ProtoMessage() {} func (x *Object_TimestampRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[53] + mi := &file_v1_payload_payload_proto_msgTypes[54] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -2952,18 +3048,19 @@ func (x *Object_TimestampRequest) GetId() *Object_ID { // Represent a vector meta data. type Object_Timestamp struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The vector ID. - Id string ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // timestamp represents when this vector inserted. - Timestamp int64 ` protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` } func (x *Object_Timestamp) Reset() { *x = Object_Timestamp{} - mi := &file_v1_payload_payload_proto_msgTypes[54] + mi := &file_v1_payload_payload_proto_msgTypes[55] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -2975,7 +3072,7 @@ func (x *Object_Timestamp) String() string { func (*Object_Timestamp) ProtoMessage() {} func (x *Object_Timestamp) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[54] + mi := &file_v1_payload_payload_proto_msgTypes[55] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3007,15 +3104,16 @@ func (x *Object_Timestamp) GetTimestamp() int64 { // Represent multiple vectors. type Object_Vectors struct { - state protoimpl.MessageState `protogen:"open.v1"` - Vectors []*Object_Vector ` protobuf:"bytes,1,rep,name=vectors,proto3" json:"vectors,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Vectors []*Object_Vector `protobuf:"bytes,1,rep,name=vectors,proto3" json:"vectors,omitempty"` } func (x *Object_Vectors) Reset() { *x = Object_Vectors{} - mi := &file_v1_payload_payload_proto_msgTypes[55] + mi := &file_v1_payload_payload_proto_msgTypes[56] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3027,7 +3125,7 @@ func (x *Object_Vectors) String() string { func (*Object_Vectors) ProtoMessage() {} func (x *Object_Vectors) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[55] + mi := &file_v1_payload_payload_proto_msgTypes[56] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3052,19 +3150,20 @@ func (x *Object_Vectors) GetVectors() []*Object_Vector { // Represent stream response of the vector. type Object_StreamVector struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Payload: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: // // *Object_StreamVector_Vector // *Object_StreamVector_Status - Payload isObject_StreamVector_Payload ` protobuf_oneof:"payload"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Payload isObject_StreamVector_Payload `protobuf_oneof:"payload"` } func (x *Object_StreamVector) Reset() { *x = Object_StreamVector{} - mi := &file_v1_payload_payload_proto_msgTypes[56] + mi := &file_v1_payload_payload_proto_msgTypes[57] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3076,7 +3175,7 @@ func (x *Object_StreamVector) String() string { func (*Object_StreamVector) ProtoMessage() {} func (x *Object_StreamVector) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[56] + mi := &file_v1_payload_payload_proto_msgTypes[57] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3092,27 +3191,23 @@ func (*Object_StreamVector) Descriptor() ([]byte, []int) { return file_v1_payload_payload_proto_rawDescGZIP(), []int{7, 9} } -func (x *Object_StreamVector) GetPayload() isObject_StreamVector_Payload { - if x != nil { - return x.Payload +func (m *Object_StreamVector) GetPayload() isObject_StreamVector_Payload { + if m != nil { + return m.Payload } return nil } func (x *Object_StreamVector) GetVector() *Object_Vector { - if x != nil { - if x, ok := x.Payload.(*Object_StreamVector_Vector); ok { - return x.Vector - } + if x, ok := x.GetPayload().(*Object_StreamVector_Vector); ok { + return x.Vector } return nil } func (x *Object_StreamVector) GetStatus() *status.Status { - if x != nil { - if x, ok := x.Payload.(*Object_StreamVector_Status); ok { - return x.Status - } + if x, ok := x.GetPayload().(*Object_StreamVector_Status); ok { + return x.Status } return nil } @@ -3137,18 +3232,19 @@ func (*Object_StreamVector_Status) isObject_StreamVector_Payload() {} // Represent reshape vector. type Object_ReshapeVector struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The binary object. - Object []byte ` protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` + Object []byte `protobuf:"bytes,1,opt,name=object,proto3" json:"object,omitempty"` // The new shape. - Shape []int32 ` protobuf:"varint,2,rep,packed,name=shape,proto3" json:"shape,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Shape []int32 `protobuf:"varint,2,rep,packed,name=shape,proto3" json:"shape,omitempty"` } func (x *Object_ReshapeVector) Reset() { *x = Object_ReshapeVector{} - mi := &file_v1_payload_payload_proto_msgTypes[57] + mi := &file_v1_payload_payload_proto_msgTypes[58] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3160,7 +3256,7 @@ func (x *Object_ReshapeVector) String() string { func (*Object_ReshapeVector) ProtoMessage() {} func (x *Object_ReshapeVector) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[57] + mi := &file_v1_payload_payload_proto_msgTypes[58] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3192,18 +3288,19 @@ func (x *Object_ReshapeVector) GetShape() []int32 { // Represent the binary object. type Object_Blob struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The object ID. - Id string ` protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // The binary object. - Object []byte ` protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Object []byte `protobuf:"bytes,2,opt,name=object,proto3" json:"object,omitempty"` } func (x *Object_Blob) Reset() { *x = Object_Blob{} - mi := &file_v1_payload_payload_proto_msgTypes[58] + mi := &file_v1_payload_payload_proto_msgTypes[59] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3215,7 +3312,7 @@ func (x *Object_Blob) String() string { func (*Object_Blob) ProtoMessage() {} func (x *Object_Blob) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[58] + mi := &file_v1_payload_payload_proto_msgTypes[59] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3247,19 +3344,20 @@ func (x *Object_Blob) GetObject() []byte { // Represent stream response of binary objects. type Object_StreamBlob struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Payload: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: // // *Object_StreamBlob_Blob // *Object_StreamBlob_Status - Payload isObject_StreamBlob_Payload ` protobuf_oneof:"payload"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Payload isObject_StreamBlob_Payload `protobuf_oneof:"payload"` } func (x *Object_StreamBlob) Reset() { *x = Object_StreamBlob{} - mi := &file_v1_payload_payload_proto_msgTypes[59] + mi := &file_v1_payload_payload_proto_msgTypes[60] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3271,7 +3369,7 @@ func (x *Object_StreamBlob) String() string { func (*Object_StreamBlob) ProtoMessage() {} func (x *Object_StreamBlob) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[59] + mi := &file_v1_payload_payload_proto_msgTypes[60] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3287,27 +3385,23 @@ func (*Object_StreamBlob) Descriptor() ([]byte, []int) { return file_v1_payload_payload_proto_rawDescGZIP(), []int{7, 12} } -func (x *Object_StreamBlob) GetPayload() isObject_StreamBlob_Payload { - if x != nil { - return x.Payload +func (m *Object_StreamBlob) GetPayload() isObject_StreamBlob_Payload { + if m != nil { + return m.Payload } return nil } func (x *Object_StreamBlob) GetBlob() *Object_Blob { - if x != nil { - if x, ok := x.Payload.(*Object_StreamBlob_Blob); ok { - return x.Blob - } + if x, ok := x.GetPayload().(*Object_StreamBlob_Blob); ok { + return x.Blob } return nil } func (x *Object_StreamBlob) GetStatus() *status.Status { - if x != nil { - if x, ok := x.Payload.(*Object_StreamBlob_Status); ok { - return x.Status - } + if x, ok := x.GetPayload().(*Object_StreamBlob_Status); ok { + return x.Status } return nil } @@ -3332,20 +3426,21 @@ func (*Object_StreamBlob_Status) isObject_StreamBlob_Payload() {} // Represent the vector location. type Object_Location struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name of the location. - Name string ` protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The UUID of the vector. - Uuid string ` protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` + Uuid string `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"` // The IP list. - Ips []string ` protobuf:"bytes,3,rep,name=ips,proto3" json:"ips,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Ips []string `protobuf:"bytes,3,rep,name=ips,proto3" json:"ips,omitempty"` } func (x *Object_Location) Reset() { *x = Object_Location{} - mi := &file_v1_payload_payload_proto_msgTypes[60] + mi := &file_v1_payload_payload_proto_msgTypes[61] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3357,7 +3452,7 @@ func (x *Object_Location) String() string { func (*Object_Location) ProtoMessage() {} func (x *Object_Location) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[60] + mi := &file_v1_payload_payload_proto_msgTypes[61] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3396,19 +3491,20 @@ func (x *Object_Location) GetIps() []string { // Represent the stream response of the vector location. type Object_StreamLocation struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Payload: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: // // *Object_StreamLocation_Location // *Object_StreamLocation_Status - Payload isObject_StreamLocation_Payload ` protobuf_oneof:"payload"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Payload isObject_StreamLocation_Payload `protobuf_oneof:"payload"` } func (x *Object_StreamLocation) Reset() { *x = Object_StreamLocation{} - mi := &file_v1_payload_payload_proto_msgTypes[61] + mi := &file_v1_payload_payload_proto_msgTypes[62] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3420,7 +3516,7 @@ func (x *Object_StreamLocation) String() string { func (*Object_StreamLocation) ProtoMessage() {} func (x *Object_StreamLocation) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[61] + mi := &file_v1_payload_payload_proto_msgTypes[62] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3436,27 +3532,23 @@ func (*Object_StreamLocation) Descriptor() ([]byte, []int) { return file_v1_payload_payload_proto_rawDescGZIP(), []int{7, 14} } -func (x *Object_StreamLocation) GetPayload() isObject_StreamLocation_Payload { - if x != nil { - return x.Payload +func (m *Object_StreamLocation) GetPayload() isObject_StreamLocation_Payload { + if m != nil { + return m.Payload } return nil } func (x *Object_StreamLocation) GetLocation() *Object_Location { - if x != nil { - if x, ok := x.Payload.(*Object_StreamLocation_Location); ok { - return x.Location - } + if x, ok := x.GetPayload().(*Object_StreamLocation_Location); ok { + return x.Location } return nil } func (x *Object_StreamLocation) GetStatus() *status.Status { - if x != nil { - if x, ok := x.Payload.(*Object_StreamLocation_Status); ok { - return x.Status - } + if x, ok := x.GetPayload().(*Object_StreamLocation_Status); ok { + return x.Status } return nil } @@ -3481,15 +3573,16 @@ func (*Object_StreamLocation_Status) isObject_StreamLocation_Payload() {} // Represent multiple vector locations. type Object_Locations struct { - state protoimpl.MessageState `protogen:"open.v1"` - Locations []*Object_Location ` protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Locations []*Object_Location `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty"` } func (x *Object_Locations) Reset() { *x = Object_Locations{} - mi := &file_v1_payload_payload_proto_msgTypes[62] + mi := &file_v1_payload_payload_proto_msgTypes[63] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3501,7 +3594,7 @@ func (x *Object_Locations) String() string { func (*Object_Locations) ProtoMessage() {} func (x *Object_Locations) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[62] + mi := &file_v1_payload_payload_proto_msgTypes[63] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3526,14 +3619,14 @@ func (x *Object_Locations) GetLocations() []*Object_Location { // Represent the list object vector stream request and response. type Object_List struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Object_List) Reset() { *x = Object_List{} - mi := &file_v1_payload_payload_proto_msgTypes[63] + mi := &file_v1_payload_payload_proto_msgTypes[64] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3545,7 +3638,7 @@ func (x *Object_List) String() string { func (*Object_List) ProtoMessage() {} func (x *Object_List) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[63] + mi := &file_v1_payload_payload_proto_msgTypes[64] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3562,14 +3655,14 @@ func (*Object_List) Descriptor() ([]byte, []int) { } type Object_List_Request struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Object_List_Request) Reset() { *x = Object_List_Request{} - mi := &file_v1_payload_payload_proto_msgTypes[64] + mi := &file_v1_payload_payload_proto_msgTypes[65] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3581,7 +3674,7 @@ func (x *Object_List_Request) String() string { func (*Object_List_Request) ProtoMessage() {} func (x *Object_List_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[64] + mi := &file_v1_payload_payload_proto_msgTypes[65] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3598,19 +3691,20 @@ func (*Object_List_Request) Descriptor() ([]byte, []int) { } type Object_List_Response struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Types that are valid to be assigned to Payload: + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Payload: // // *Object_List_Response_Vector // *Object_List_Response_Status - Payload isObject_List_Response_Payload ` protobuf_oneof:"payload"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Payload isObject_List_Response_Payload `protobuf_oneof:"payload"` } func (x *Object_List_Response) Reset() { *x = Object_List_Response{} - mi := &file_v1_payload_payload_proto_msgTypes[65] + mi := &file_v1_payload_payload_proto_msgTypes[66] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3622,7 +3716,7 @@ func (x *Object_List_Response) String() string { func (*Object_List_Response) ProtoMessage() {} func (x *Object_List_Response) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[65] + mi := &file_v1_payload_payload_proto_msgTypes[66] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3638,27 +3732,23 @@ func (*Object_List_Response) Descriptor() ([]byte, []int) { return file_v1_payload_payload_proto_rawDescGZIP(), []int{7, 16, 1} } -func (x *Object_List_Response) GetPayload() isObject_List_Response_Payload { - if x != nil { - return x.Payload +func (m *Object_List_Response) GetPayload() isObject_List_Response_Payload { + if m != nil { + return m.Payload } return nil } func (x *Object_List_Response) GetVector() *Object_Vector { - if x != nil { - if x, ok := x.Payload.(*Object_List_Response_Vector); ok { - return x.Vector - } + if x, ok := x.GetPayload().(*Object_List_Response_Vector); ok { + return x.Vector } return nil } func (x *Object_List_Response) GetStatus() *status.Status { - if x != nil { - if x, ok := x.Payload.(*Object_List_Response_Status); ok { - return x.Status - } + if x, ok := x.GetPayload().(*Object_List_Response_Status); ok { + return x.Status } return nil } @@ -3683,16 +3773,17 @@ func (*Object_List_Response_Status) isObject_List_Response_Payload() {} // Represent the create index request. type Control_CreateIndexRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The pool size of the create index operation. - PoolSize uint32 ` protobuf:"varint,1,opt,name=pool_size,json=poolSize,proto3" json:"pool_size,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The pool size of the create index operation. + PoolSize uint32 `protobuf:"varint,1,opt,name=pool_size,json=poolSize,proto3" json:"pool_size,omitempty"` } func (x *Control_CreateIndexRequest) Reset() { *x = Control_CreateIndexRequest{} - mi := &file_v1_payload_payload_proto_msgTypes[66] + mi := &file_v1_payload_payload_proto_msgTypes[67] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3704,7 +3795,7 @@ func (x *Control_CreateIndexRequest) String() string { func (*Control_CreateIndexRequest) ProtoMessage() {} func (x *Control_CreateIndexRequest) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[66] + mi := &file_v1_payload_payload_proto_msgTypes[67] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3729,20 +3820,21 @@ func (x *Control_CreateIndexRequest) GetPoolSize() uint32 { // Represent the dicoverer request. type Discoverer_Request struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The agent name to be discovered. - Name string ` protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The namespace to be discovered. - Namespace string ` protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` // The node to be discovered. - Node string ` protobuf:"bytes,3,opt,name=node,proto3" json:"node,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Node string `protobuf:"bytes,3,opt,name=node,proto3" json:"node,omitempty"` } func (x *Discoverer_Request) Reset() { *x = Discoverer_Request{} - mi := &file_v1_payload_payload_proto_msgTypes[67] + mi := &file_v1_payload_payload_proto_msgTypes[68] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3754,7 +3846,7 @@ func (x *Discoverer_Request) String() string { func (*Discoverer_Request) ProtoMessage() {} func (x *Discoverer_Request) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[67] + mi := &file_v1_payload_payload_proto_msgTypes[68] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3793,14 +3885,14 @@ func (x *Discoverer_Request) GetNode() string { // Represent the index information messages. type Info_Index struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Info_Index) Reset() { *x = Info_Index{} - mi := &file_v1_payload_payload_proto_msgTypes[68] + mi := &file_v1_payload_payload_proto_msgTypes[69] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3812,7 +3904,7 @@ func (x *Info_Index) String() string { func (*Info_Index) ProtoMessage() {} func (x *Info_Index) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[68] + mi := &file_v1_payload_payload_proto_msgTypes[69] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3830,28 +3922,29 @@ func (*Info_Index) Descriptor() ([]byte, []int) { // Represent the pod information message. type Info_Pod struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The app name of the pod on the label. - AppName string ` protobuf:"bytes,1,opt,name=app_name,json=appName,proto3" json:"app_name,omitempty"` + AppName string `protobuf:"bytes,1,opt,name=app_name,json=appName,proto3" json:"app_name,omitempty"` // The name of the pod. - Name string ` protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // The namespace of the pod. - Namespace string ` protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace,proto3" json:"namespace,omitempty"` // The IP of the pod. - Ip string ` protobuf:"bytes,4,opt,name=ip,proto3" json:"ip,omitempty"` + Ip string `protobuf:"bytes,4,opt,name=ip,proto3" json:"ip,omitempty"` // The CPU information of the pod. - Cpu *Info_CPU ` protobuf:"bytes,5,opt,name=cpu,proto3" json:"cpu,omitempty"` + Cpu *Info_CPU `protobuf:"bytes,5,opt,name=cpu,proto3" json:"cpu,omitempty"` // The memory information of the pod. - Memory *Info_Memory ` protobuf:"bytes,6,opt,name=memory,proto3" json:"memory,omitempty"` + Memory *Info_Memory `protobuf:"bytes,6,opt,name=memory,proto3" json:"memory,omitempty"` // The node information of the pod. - Node *Info_Node ` protobuf:"bytes,7,opt,name=node,proto3" json:"node,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Node *Info_Node `protobuf:"bytes,7,opt,name=node,proto3" json:"node,omitempty"` } func (x *Info_Pod) Reset() { *x = Info_Pod{} - mi := &file_v1_payload_payload_proto_msgTypes[69] + mi := &file_v1_payload_payload_proto_msgTypes[70] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3863,7 +3956,7 @@ func (x *Info_Pod) String() string { func (*Info_Pod) ProtoMessage() {} func (x *Info_Pod) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[69] + mi := &file_v1_payload_payload_proto_msgTypes[70] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -3930,26 +4023,27 @@ func (x *Info_Pod) GetNode() *Info_Node { // Represent the node information message. type Info_Node struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name of the node. - Name string ` protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The internal IP address of the node. - InternalAddr string ` protobuf:"bytes,2,opt,name=internal_addr,json=internalAddr,proto3" json:"internal_addr,omitempty"` + InternalAddr string `protobuf:"bytes,2,opt,name=internal_addr,json=internalAddr,proto3" json:"internal_addr,omitempty"` // The external IP address of the node. - ExternalAddr string ` protobuf:"bytes,3,opt,name=external_addr,json=externalAddr,proto3" json:"external_addr,omitempty"` + ExternalAddr string `protobuf:"bytes,3,opt,name=external_addr,json=externalAddr,proto3" json:"external_addr,omitempty"` // The CPU information of the node. - Cpu *Info_CPU ` protobuf:"bytes,4,opt,name=cpu,proto3" json:"cpu,omitempty"` + Cpu *Info_CPU `protobuf:"bytes,4,opt,name=cpu,proto3" json:"cpu,omitempty"` // The memory information of the node. - Memory *Info_Memory ` protobuf:"bytes,5,opt,name=memory,proto3" json:"memory,omitempty"` + Memory *Info_Memory `protobuf:"bytes,5,opt,name=memory,proto3" json:"memory,omitempty"` // The pod information of the node. - Pods *Info_Pods ` protobuf:"bytes,6,opt,name=Pods,proto3" json:"Pods,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Pods *Info_Pods `protobuf:"bytes,6,opt,name=Pods,proto3" json:"Pods,omitempty"` } func (x *Info_Node) Reset() { *x = Info_Node{} - mi := &file_v1_payload_payload_proto_msgTypes[70] + mi := &file_v1_payload_payload_proto_msgTypes[71] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -3961,7 +4055,7 @@ func (x *Info_Node) String() string { func (*Info_Node) ProtoMessage() {} func (x *Info_Node) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[70] + mi := &file_v1_payload_payload_proto_msgTypes[71] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4021,26 +4115,27 @@ func (x *Info_Node) GetPods() *Info_Pods { // Represent the service information message. type Info_Service struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name of the svc. - Name string ` protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The cluster ip of the svc. - ClusterIp string ` protobuf:"bytes,2,opt,name=cluster_ip,json=clusterIp,proto3" json:"cluster_ip,omitempty"` + ClusterIp string `protobuf:"bytes,2,opt,name=cluster_ip,json=clusterIp,proto3" json:"cluster_ip,omitempty"` // The cluster ips of the svc. - ClusterIps []string ` protobuf:"bytes,3,rep,name=cluster_ips,json=clusterIps,proto3" json:"cluster_ips,omitempty"` + ClusterIps []string `protobuf:"bytes,3,rep,name=cluster_ips,json=clusterIps,proto3" json:"cluster_ips,omitempty"` // The port of the svc. - Ports []*Info_ServicePort ` protobuf:"bytes,4,rep,name=ports,proto3" json:"ports,omitempty"` + Ports []*Info_ServicePort `protobuf:"bytes,4,rep,name=ports,proto3" json:"ports,omitempty"` // The labels of the service. - Labels *Info_Labels ` protobuf:"bytes,5,opt,name=labels,proto3" json:"labels,omitempty"` + Labels *Info_Labels `protobuf:"bytes,5,opt,name=labels,proto3" json:"labels,omitempty"` // The annotations of the service. - Annotations *Info_Annotations ` protobuf:"bytes,6,opt,name=annotations,proto3" json:"annotations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Annotations *Info_Annotations `protobuf:"bytes,6,opt,name=annotations,proto3" json:"annotations,omitempty"` } func (x *Info_Service) Reset() { *x = Info_Service{} - mi := &file_v1_payload_payload_proto_msgTypes[71] + mi := &file_v1_payload_payload_proto_msgTypes[72] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4052,7 +4147,7 @@ func (x *Info_Service) String() string { func (*Info_Service) ProtoMessage() {} func (x *Info_Service) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[71] + mi := &file_v1_payload_payload_proto_msgTypes[72] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4112,18 +4207,19 @@ func (x *Info_Service) GetAnnotations() *Info_Annotations { // Represets the service port information message. type Info_ServicePort struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The name of the port. - Name string ` protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // The port number - Port int32 ` protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } func (x *Info_ServicePort) Reset() { *x = Info_ServicePort{} - mi := &file_v1_payload_payload_proto_msgTypes[72] + mi := &file_v1_payload_payload_proto_msgTypes[73] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4135,7 +4231,7 @@ func (x *Info_ServicePort) String() string { func (*Info_ServicePort) ProtoMessage() {} func (x *Info_ServicePort) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[72] + mi := &file_v1_payload_payload_proto_msgTypes[73] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4167,15 +4263,16 @@ func (x *Info_ServicePort) GetPort() int32 { // Represent the kubernetes labels. type Info_Labels struct { - state protoimpl.MessageState `protogen:"open.v1"` - Labels map[string]string ` protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Labels map[string]string `protobuf:"bytes,1,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Info_Labels) Reset() { *x = Info_Labels{} - mi := &file_v1_payload_payload_proto_msgTypes[73] + mi := &file_v1_payload_payload_proto_msgTypes[74] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4187,7 +4284,7 @@ func (x *Info_Labels) String() string { func (*Info_Labels) ProtoMessage() {} func (x *Info_Labels) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[73] + mi := &file_v1_payload_payload_proto_msgTypes[74] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4212,15 +4309,16 @@ func (x *Info_Labels) GetLabels() map[string]string { // Represent the kubernetes annotations. type Info_Annotations struct { - state protoimpl.MessageState `protogen:"open.v1"` - Annotations map[string]string ` protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Annotations map[string]string `protobuf:"bytes,1,rep,name=annotations,proto3" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Info_Annotations) Reset() { *x = Info_Annotations{} - mi := &file_v1_payload_payload_proto_msgTypes[74] + mi := &file_v1_payload_payload_proto_msgTypes[75] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4232,7 +4330,7 @@ func (x *Info_Annotations) String() string { func (*Info_Annotations) ProtoMessage() {} func (x *Info_Annotations) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[74] + mi := &file_v1_payload_payload_proto_msgTypes[75] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4257,20 +4355,21 @@ func (x *Info_Annotations) GetAnnotations() map[string]string { // Represent the CPU information message. type Info_CPU struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The CPU resource limit. - Limit float64 ` protobuf:"fixed64,1,opt,name=limit,proto3" json:"limit,omitempty"` + Limit float64 `protobuf:"fixed64,1,opt,name=limit,proto3" json:"limit,omitempty"` // The CPU resource requested. - Request float64 ` protobuf:"fixed64,2,opt,name=request,proto3" json:"request,omitempty"` + Request float64 `protobuf:"fixed64,2,opt,name=request,proto3" json:"request,omitempty"` // The CPU usage. - Usage float64 ` protobuf:"fixed64,3,opt,name=usage,proto3" json:"usage,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Usage float64 `protobuf:"fixed64,3,opt,name=usage,proto3" json:"usage,omitempty"` } func (x *Info_CPU) Reset() { *x = Info_CPU{} - mi := &file_v1_payload_payload_proto_msgTypes[75] + mi := &file_v1_payload_payload_proto_msgTypes[76] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4282,7 +4381,7 @@ func (x *Info_CPU) String() string { func (*Info_CPU) ProtoMessage() {} func (x *Info_CPU) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[75] + mi := &file_v1_payload_payload_proto_msgTypes[76] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4321,20 +4420,21 @@ func (x *Info_CPU) GetUsage() float64 { // Represent the memory information message. type Info_Memory struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The memory limit. - Limit float64 ` protobuf:"fixed64,1,opt,name=limit,proto3" json:"limit,omitempty"` + Limit float64 `protobuf:"fixed64,1,opt,name=limit,proto3" json:"limit,omitempty"` // The memory requested. - Request float64 ` protobuf:"fixed64,2,opt,name=request,proto3" json:"request,omitempty"` + Request float64 `protobuf:"fixed64,2,opt,name=request,proto3" json:"request,omitempty"` // The memory usage. - Usage float64 ` protobuf:"fixed64,3,opt,name=usage,proto3" json:"usage,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Usage float64 `protobuf:"fixed64,3,opt,name=usage,proto3" json:"usage,omitempty"` } func (x *Info_Memory) Reset() { *x = Info_Memory{} - mi := &file_v1_payload_payload_proto_msgTypes[76] + mi := &file_v1_payload_payload_proto_msgTypes[77] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4346,7 +4446,7 @@ func (x *Info_Memory) String() string { func (*Info_Memory) ProtoMessage() {} func (x *Info_Memory) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[76] + mi := &file_v1_payload_payload_proto_msgTypes[77] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4385,16 +4485,17 @@ func (x *Info_Memory) GetUsage() float64 { // Represent the multiple pod information message. type Info_Pods struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The multiple pod information. - Pods []*Info_Pod ` protobuf:"bytes,1,rep,name=pods,proto3" json:"pods,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The multiple pod information. + Pods []*Info_Pod `protobuf:"bytes,1,rep,name=pods,proto3" json:"pods,omitempty"` } func (x *Info_Pods) Reset() { *x = Info_Pods{} - mi := &file_v1_payload_payload_proto_msgTypes[77] + mi := &file_v1_payload_payload_proto_msgTypes[78] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4406,7 +4507,7 @@ func (x *Info_Pods) String() string { func (*Info_Pods) ProtoMessage() {} func (x *Info_Pods) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[77] + mi := &file_v1_payload_payload_proto_msgTypes[78] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4431,16 +4532,17 @@ func (x *Info_Pods) GetPods() []*Info_Pod { // Represent the multiple node information message. type Info_Nodes struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The multiple node information. - Nodes []*Info_Node ` protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The multiple node information. + Nodes []*Info_Node `protobuf:"bytes,1,rep,name=nodes,proto3" json:"nodes,omitempty"` } func (x *Info_Nodes) Reset() { *x = Info_Nodes{} - mi := &file_v1_payload_payload_proto_msgTypes[78] + mi := &file_v1_payload_payload_proto_msgTypes[79] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4452,7 +4554,7 @@ func (x *Info_Nodes) String() string { func (*Info_Nodes) ProtoMessage() {} func (x *Info_Nodes) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[78] + mi := &file_v1_payload_payload_proto_msgTypes[79] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4477,16 +4579,17 @@ func (x *Info_Nodes) GetNodes() []*Info_Node { // Represent the multiple service information message. type Info_Services struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The multiple service information. - Services []*Info_Service ` protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The multiple service information. + Services []*Info_Service `protobuf:"bytes,1,rep,name=services,proto3" json:"services,omitempty"` } func (x *Info_Services) Reset() { *x = Info_Services{} - mi := &file_v1_payload_payload_proto_msgTypes[79] + mi := &file_v1_payload_payload_proto_msgTypes[80] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4498,7 +4601,7 @@ func (x *Info_Services) String() string { func (*Info_Services) ProtoMessage() {} func (x *Info_Services) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[79] + mi := &file_v1_payload_payload_proto_msgTypes[80] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4523,15 +4626,16 @@ func (x *Info_Services) GetServices() []*Info_Service { // Represent the multiple IP message. type Info_IPs struct { - state protoimpl.MessageState `protogen:"open.v1"` - Ip []string ` protobuf:"bytes,1,rep,name=ip,proto3" json:"ip,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Ip []string `protobuf:"bytes,1,rep,name=ip,proto3" json:"ip,omitempty"` } func (x *Info_IPs) Reset() { *x = Info_IPs{} - mi := &file_v1_payload_payload_proto_msgTypes[80] + mi := &file_v1_payload_payload_proto_msgTypes[81] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4543,7 +4647,7 @@ func (x *Info_IPs) String() string { func (*Info_IPs) ProtoMessage() {} func (x *Info_IPs) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[80] + mi := &file_v1_payload_payload_proto_msgTypes[81] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4568,22 +4672,23 @@ func (x *Info_IPs) GetIp() []string { // Represent the index count message. type Info_Index_Count struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The stored index count. - Stored uint32 ` protobuf:"varint,1,opt,name=stored,proto3" json:"stored,omitempty"` + Stored uint32 `protobuf:"varint,1,opt,name=stored,proto3" json:"stored,omitempty"` // The uncommitted index count. - Uncommitted uint32 ` protobuf:"varint,2,opt,name=uncommitted,proto3" json:"uncommitted,omitempty"` + Uncommitted uint32 `protobuf:"varint,2,opt,name=uncommitted,proto3" json:"uncommitted,omitempty"` // The indexing index count. - Indexing bool ` protobuf:"varint,3,opt,name=indexing,proto3" json:"indexing,omitempty"` + Indexing bool `protobuf:"varint,3,opt,name=indexing,proto3" json:"indexing,omitempty"` // The saving index count. - Saving bool ` protobuf:"varint,4,opt,name=saving,proto3" json:"saving,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Saving bool `protobuf:"varint,4,opt,name=saving,proto3" json:"saving,omitempty"` } func (x *Info_Index_Count) Reset() { *x = Info_Index_Count{} - mi := &file_v1_payload_payload_proto_msgTypes[81] + mi := &file_v1_payload_payload_proto_msgTypes[82] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4595,7 +4700,7 @@ func (x *Info_Index_Count) String() string { func (*Info_Index_Count) ProtoMessage() {} func (x *Info_Index_Count) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[81] + mi := &file_v1_payload_payload_proto_msgTypes[82] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4641,20 +4746,21 @@ func (x *Info_Index_Count) GetSaving() bool { // Represent the index count for each Agents message. type Info_Index_Detail struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // count infos for each agents - Counts map[string]*Info_Index_Count ` protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + Counts map[string]*Info_Index_Count `protobuf:"bytes,1,rep,name=counts,proto3" json:"counts,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // index replica of vald cluster - Replica uint32 ` protobuf:"varint,2,opt,name=replica,proto3" json:"replica,omitempty"` + Replica uint32 `protobuf:"varint,2,opt,name=replica,proto3" json:"replica,omitempty"` // live agent replica of vald cluster - LiveAgents uint32 ` protobuf:"varint,3,opt,name=live_agents,json=liveAgents,proto3" json:"live_agents,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + LiveAgents uint32 `protobuf:"varint,3,opt,name=live_agents,json=liveAgents,proto3" json:"live_agents,omitempty"` } func (x *Info_Index_Detail) Reset() { *x = Info_Index_Detail{} - mi := &file_v1_payload_payload_proto_msgTypes[82] + mi := &file_v1_payload_payload_proto_msgTypes[83] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4666,7 +4772,7 @@ func (x *Info_Index_Detail) String() string { func (*Info_Index_Detail) ProtoMessage() {} func (x *Info_Index_Detail) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[82] + mi := &file_v1_payload_payload_proto_msgTypes[83] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4705,14 +4811,14 @@ func (x *Info_Index_Detail) GetLiveAgents() uint32 { // Represent the UUID message. type Info_Index_UUID struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields } func (x *Info_Index_UUID) Reset() { *x = Info_Index_UUID{} - mi := &file_v1_payload_payload_proto_msgTypes[83] + mi := &file_v1_payload_payload_proto_msgTypes[84] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4724,7 +4830,7 @@ func (x *Info_Index_UUID) String() string { func (*Info_Index_UUID) ProtoMessage() {} func (x *Info_Index_UUID) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[83] + mi := &file_v1_payload_payload_proto_msgTypes[84] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -4742,47 +4848,48 @@ func (*Info_Index_UUID) Descriptor() ([]byte, []int) { // Represents index Statistics type Info_Index_Statistics struct { - state protoimpl.MessageState `protogen:"open.v1"` - Valid bool ` protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` - MedianIndegree int32 ` protobuf:"varint,2,opt,name=median_indegree,json=medianIndegree,proto3" json:"median_indegree,omitempty"` - MedianOutdegree int32 ` protobuf:"varint,3,opt,name=median_outdegree,json=medianOutdegree,proto3" json:"median_outdegree,omitempty"` - MaxNumberOfIndegree uint64 ` protobuf:"varint,4,opt,name=max_number_of_indegree,json=maxNumberOfIndegree,proto3" json:"max_number_of_indegree,omitempty"` - MaxNumberOfOutdegree uint64 ` protobuf:"varint,5,opt,name=max_number_of_outdegree,json=maxNumberOfOutdegree,proto3" json:"max_number_of_outdegree,omitempty"` - MinNumberOfIndegree uint64 ` protobuf:"varint,6,opt,name=min_number_of_indegree,json=minNumberOfIndegree,proto3" json:"min_number_of_indegree,omitempty"` - MinNumberOfOutdegree uint64 ` protobuf:"varint,7,opt,name=min_number_of_outdegree,json=minNumberOfOutdegree,proto3" json:"min_number_of_outdegree,omitempty"` - ModeIndegree uint64 ` protobuf:"varint,8,opt,name=mode_indegree,json=modeIndegree,proto3" json:"mode_indegree,omitempty"` - ModeOutdegree uint64 ` protobuf:"varint,9,opt,name=mode_outdegree,json=modeOutdegree,proto3" json:"mode_outdegree,omitempty"` - NodesSkippedFor10Edges uint64 ` protobuf:"varint,10,opt,name=nodes_skipped_for_10_edges,json=nodesSkippedFor10Edges,proto3" json:"nodes_skipped_for_10_edges,omitempty"` - NodesSkippedForIndegreeDistance uint64 ` protobuf:"varint,11,opt,name=nodes_skipped_for_indegree_distance,json=nodesSkippedForIndegreeDistance,proto3" json:"nodes_skipped_for_indegree_distance,omitempty"` - NumberOfEdges uint64 ` protobuf:"varint,12,opt,name=number_of_edges,json=numberOfEdges,proto3" json:"number_of_edges,omitempty"` - NumberOfIndexedObjects uint64 ` protobuf:"varint,13,opt,name=number_of_indexed_objects,json=numberOfIndexedObjects,proto3" json:"number_of_indexed_objects,omitempty"` - NumberOfNodes uint64 ` protobuf:"varint,14,opt,name=number_of_nodes,json=numberOfNodes,proto3" json:"number_of_nodes,omitempty"` - NumberOfNodesWithoutEdges uint64 ` protobuf:"varint,15,opt,name=number_of_nodes_without_edges,json=numberOfNodesWithoutEdges,proto3" json:"number_of_nodes_without_edges,omitempty"` - NumberOfNodesWithoutIndegree uint64 ` protobuf:"varint,16,opt,name=number_of_nodes_without_indegree,json=numberOfNodesWithoutIndegree,proto3" json:"number_of_nodes_without_indegree,omitempty"` - NumberOfObjects uint64 ` protobuf:"varint,17,opt,name=number_of_objects,json=numberOfObjects,proto3" json:"number_of_objects,omitempty"` - NumberOfRemovedObjects uint64 ` protobuf:"varint,18,opt,name=number_of_removed_objects,json=numberOfRemovedObjects,proto3" json:"number_of_removed_objects,omitempty"` - SizeOfObjectRepository uint64 ` protobuf:"varint,19,opt,name=size_of_object_repository,json=sizeOfObjectRepository,proto3" json:"size_of_object_repository,omitempty"` - SizeOfRefinementObjectRepository uint64 ` protobuf:"varint,20,opt,name=size_of_refinement_object_repository,json=sizeOfRefinementObjectRepository,proto3" json:"size_of_refinement_object_repository,omitempty"` - VarianceOfIndegree float64 ` protobuf:"fixed64,21,opt,name=variance_of_indegree,json=varianceOfIndegree,proto3" json:"variance_of_indegree,omitempty"` - VarianceOfOutdegree float64 ` protobuf:"fixed64,22,opt,name=variance_of_outdegree,json=varianceOfOutdegree,proto3" json:"variance_of_outdegree,omitempty"` - MeanEdgeLength float64 ` protobuf:"fixed64,23,opt,name=mean_edge_length,json=meanEdgeLength,proto3" json:"mean_edge_length,omitempty"` - MeanEdgeLengthFor10Edges float64 ` protobuf:"fixed64,24,opt,name=mean_edge_length_for_10_edges,json=meanEdgeLengthFor10Edges,proto3" json:"mean_edge_length_for_10_edges,omitempty"` - MeanIndegreeDistanceFor10Edges float64 ` protobuf:"fixed64,25,opt,name=mean_indegree_distance_for_10_edges,json=meanIndegreeDistanceFor10Edges,proto3" json:"mean_indegree_distance_for_10_edges,omitempty"` - MeanNumberOfEdgesPerNode float64 ` protobuf:"fixed64,26,opt,name=mean_number_of_edges_per_node,json=meanNumberOfEdgesPerNode,proto3" json:"mean_number_of_edges_per_node,omitempty"` - C1Indegree float64 ` protobuf:"fixed64,27,opt,name=c1_indegree,json=c1Indegree,proto3" json:"c1_indegree,omitempty"` - C5Indegree float64 ` protobuf:"fixed64,28,opt,name=c5_indegree,json=c5Indegree,proto3" json:"c5_indegree,omitempty"` - C95Outdegree float64 ` protobuf:"fixed64,29,opt,name=c95_outdegree,json=c95Outdegree,proto3" json:"c95_outdegree,omitempty"` - C99Outdegree float64 ` protobuf:"fixed64,30,opt,name=c99_outdegree,json=c99Outdegree,proto3" json:"c99_outdegree,omitempty"` - IndegreeCount []int64 ` protobuf:"varint,31,rep,packed,name=indegree_count,json=indegreeCount,proto3" json:"indegree_count,omitempty"` - OutdegreeHistogram []uint64 ` protobuf:"varint,32,rep,packed,name=outdegree_histogram,json=outdegreeHistogram,proto3" json:"outdegree_histogram,omitempty"` - IndegreeHistogram []uint64 ` protobuf:"varint,33,rep,packed,name=indegree_histogram,json=indegreeHistogram,proto3" json:"indegree_histogram,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Valid bool `protobuf:"varint,1,opt,name=valid,proto3" json:"valid,omitempty"` + MedianIndegree int32 `protobuf:"varint,2,opt,name=median_indegree,json=medianIndegree,proto3" json:"median_indegree,omitempty"` + MedianOutdegree int32 `protobuf:"varint,3,opt,name=median_outdegree,json=medianOutdegree,proto3" json:"median_outdegree,omitempty"` + MaxNumberOfIndegree uint64 `protobuf:"varint,4,opt,name=max_number_of_indegree,json=maxNumberOfIndegree,proto3" json:"max_number_of_indegree,omitempty"` + MaxNumberOfOutdegree uint64 `protobuf:"varint,5,opt,name=max_number_of_outdegree,json=maxNumberOfOutdegree,proto3" json:"max_number_of_outdegree,omitempty"` + MinNumberOfIndegree uint64 `protobuf:"varint,6,opt,name=min_number_of_indegree,json=minNumberOfIndegree,proto3" json:"min_number_of_indegree,omitempty"` + MinNumberOfOutdegree uint64 `protobuf:"varint,7,opt,name=min_number_of_outdegree,json=minNumberOfOutdegree,proto3" json:"min_number_of_outdegree,omitempty"` + ModeIndegree uint64 `protobuf:"varint,8,opt,name=mode_indegree,json=modeIndegree,proto3" json:"mode_indegree,omitempty"` + ModeOutdegree uint64 `protobuf:"varint,9,opt,name=mode_outdegree,json=modeOutdegree,proto3" json:"mode_outdegree,omitempty"` + NodesSkippedFor10Edges uint64 `protobuf:"varint,10,opt,name=nodes_skipped_for_10_edges,json=nodesSkippedFor10Edges,proto3" json:"nodes_skipped_for_10_edges,omitempty"` + NodesSkippedForIndegreeDistance uint64 `protobuf:"varint,11,opt,name=nodes_skipped_for_indegree_distance,json=nodesSkippedForIndegreeDistance,proto3" json:"nodes_skipped_for_indegree_distance,omitempty"` + NumberOfEdges uint64 `protobuf:"varint,12,opt,name=number_of_edges,json=numberOfEdges,proto3" json:"number_of_edges,omitempty"` + NumberOfIndexedObjects uint64 `protobuf:"varint,13,opt,name=number_of_indexed_objects,json=numberOfIndexedObjects,proto3" json:"number_of_indexed_objects,omitempty"` + NumberOfNodes uint64 `protobuf:"varint,14,opt,name=number_of_nodes,json=numberOfNodes,proto3" json:"number_of_nodes,omitempty"` + NumberOfNodesWithoutEdges uint64 `protobuf:"varint,15,opt,name=number_of_nodes_without_edges,json=numberOfNodesWithoutEdges,proto3" json:"number_of_nodes_without_edges,omitempty"` + NumberOfNodesWithoutIndegree uint64 `protobuf:"varint,16,opt,name=number_of_nodes_without_indegree,json=numberOfNodesWithoutIndegree,proto3" json:"number_of_nodes_without_indegree,omitempty"` + NumberOfObjects uint64 `protobuf:"varint,17,opt,name=number_of_objects,json=numberOfObjects,proto3" json:"number_of_objects,omitempty"` + NumberOfRemovedObjects uint64 `protobuf:"varint,18,opt,name=number_of_removed_objects,json=numberOfRemovedObjects,proto3" json:"number_of_removed_objects,omitempty"` + SizeOfObjectRepository uint64 `protobuf:"varint,19,opt,name=size_of_object_repository,json=sizeOfObjectRepository,proto3" json:"size_of_object_repository,omitempty"` + SizeOfRefinementObjectRepository uint64 `protobuf:"varint,20,opt,name=size_of_refinement_object_repository,json=sizeOfRefinementObjectRepository,proto3" json:"size_of_refinement_object_repository,omitempty"` + VarianceOfIndegree float64 `protobuf:"fixed64,21,opt,name=variance_of_indegree,json=varianceOfIndegree,proto3" json:"variance_of_indegree,omitempty"` + VarianceOfOutdegree float64 `protobuf:"fixed64,22,opt,name=variance_of_outdegree,json=varianceOfOutdegree,proto3" json:"variance_of_outdegree,omitempty"` + MeanEdgeLength float64 `protobuf:"fixed64,23,opt,name=mean_edge_length,json=meanEdgeLength,proto3" json:"mean_edge_length,omitempty"` + MeanEdgeLengthFor10Edges float64 `protobuf:"fixed64,24,opt,name=mean_edge_length_for_10_edges,json=meanEdgeLengthFor10Edges,proto3" json:"mean_edge_length_for_10_edges,omitempty"` + MeanIndegreeDistanceFor10Edges float64 `protobuf:"fixed64,25,opt,name=mean_indegree_distance_for_10_edges,json=meanIndegreeDistanceFor10Edges,proto3" json:"mean_indegree_distance_for_10_edges,omitempty"` + MeanNumberOfEdgesPerNode float64 `protobuf:"fixed64,26,opt,name=mean_number_of_edges_per_node,json=meanNumberOfEdgesPerNode,proto3" json:"mean_number_of_edges_per_node,omitempty"` + C1Indegree float64 `protobuf:"fixed64,27,opt,name=c1_indegree,json=c1Indegree,proto3" json:"c1_indegree,omitempty"` + C5Indegree float64 `protobuf:"fixed64,28,opt,name=c5_indegree,json=c5Indegree,proto3" json:"c5_indegree,omitempty"` + C95Outdegree float64 `protobuf:"fixed64,29,opt,name=c95_outdegree,json=c95Outdegree,proto3" json:"c95_outdegree,omitempty"` + C99Outdegree float64 `protobuf:"fixed64,30,opt,name=c99_outdegree,json=c99Outdegree,proto3" json:"c99_outdegree,omitempty"` + IndegreeCount []int64 `protobuf:"varint,31,rep,packed,name=indegree_count,json=indegreeCount,proto3" json:"indegree_count,omitempty"` + OutdegreeHistogram []uint64 `protobuf:"varint,32,rep,packed,name=outdegree_histogram,json=outdegreeHistogram,proto3" json:"outdegree_histogram,omitempty"` + IndegreeHistogram []uint64 `protobuf:"varint,33,rep,packed,name=indegree_histogram,json=indegreeHistogram,proto3" json:"indegree_histogram,omitempty"` } func (x *Info_Index_Statistics) Reset() { *x = Info_Index_Statistics{} - mi := &file_v1_payload_payload_proto_msgTypes[84] + mi := &file_v1_payload_payload_proto_msgTypes[85] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -4794,7 +4901,7 @@ func (x *Info_Index_Statistics) String() string { func (*Info_Index_Statistics) ProtoMessage() {} func (x *Info_Index_Statistics) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[84] + mi := &file_v1_payload_payload_proto_msgTypes[85] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5043,16 +5150,17 @@ func (x *Info_Index_Statistics) GetIndegreeHistogram() []uint64 { // Represents index Statistics for each Agents type Info_Index_StatisticsDetail struct { - state protoimpl.MessageState `protogen:"open.v1"` - // count infos for each agents - Details map[string]*Info_Index_Statistics ` protobuf:"bytes,1,rep,name=details,proto3" json:"details,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // count infos for each agents + Details map[string]*Info_Index_Statistics `protobuf:"bytes,1,rep,name=details,proto3" json:"details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Info_Index_StatisticsDetail) Reset() { *x = Info_Index_StatisticsDetail{} - mi := &file_v1_payload_payload_proto_msgTypes[85] + mi := &file_v1_payload_payload_proto_msgTypes[86] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5064,7 +5172,7 @@ func (x *Info_Index_StatisticsDetail) String() string { func (*Info_Index_StatisticsDetail) ProtoMessage() {} func (x *Info_Index_StatisticsDetail) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[85] + mi := &file_v1_payload_payload_proto_msgTypes[86] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5089,48 +5197,49 @@ func (x *Info_Index_StatisticsDetail) GetDetails() map[string]*Info_Index_Statis // Represents index Property type Info_Index_Property struct { - state protoimpl.MessageState `protogen:"open.v1"` - Dimension int32 ` protobuf:"varint,1,opt,name=dimension,proto3" json:"dimension,omitempty"` - ThreadPoolSize int32 ` protobuf:"varint,2,opt,name=thread_pool_size,json=threadPoolSize,proto3" json:"thread_pool_size,omitempty"` - ObjectType string ` protobuf:"bytes,3,opt,name=object_type,json=objectType,proto3" json:"object_type,omitempty"` - DistanceType string ` protobuf:"bytes,4,opt,name=distance_type,json=distanceType,proto3" json:"distance_type,omitempty"` - IndexType string ` protobuf:"bytes,5,opt,name=index_type,json=indexType,proto3" json:"index_type,omitempty"` - DatabaseType string ` protobuf:"bytes,6,opt,name=database_type,json=databaseType,proto3" json:"database_type,omitempty"` - ObjectAlignment string ` protobuf:"bytes,7,opt,name=object_alignment,json=objectAlignment,proto3" json:"object_alignment,omitempty"` - PathAdjustmentInterval int32 ` protobuf:"varint,8,opt,name=path_adjustment_interval,json=pathAdjustmentInterval,proto3" json:"path_adjustment_interval,omitempty"` - GraphSharedMemorySize int32 ` protobuf:"varint,9,opt,name=graph_shared_memory_size,json=graphSharedMemorySize,proto3" json:"graph_shared_memory_size,omitempty"` - TreeSharedMemorySize int32 ` protobuf:"varint,10,opt,name=tree_shared_memory_size,json=treeSharedMemorySize,proto3" json:"tree_shared_memory_size,omitempty"` - ObjectSharedMemorySize int32 ` protobuf:"varint,11,opt,name=object_shared_memory_size,json=objectSharedMemorySize,proto3" json:"object_shared_memory_size,omitempty"` - PrefetchOffset int32 ` protobuf:"varint,12,opt,name=prefetch_offset,json=prefetchOffset,proto3" json:"prefetch_offset,omitempty"` - PrefetchSize int32 ` protobuf:"varint,13,opt,name=prefetch_size,json=prefetchSize,proto3" json:"prefetch_size,omitempty"` - AccuracyTable string ` protobuf:"bytes,14,opt,name=accuracy_table,json=accuracyTable,proto3" json:"accuracy_table,omitempty"` - SearchType string ` protobuf:"bytes,15,opt,name=search_type,json=searchType,proto3" json:"search_type,omitempty"` - MaxMagnitude float32 ` protobuf:"fixed32,16,opt,name=max_magnitude,json=maxMagnitude,proto3" json:"max_magnitude,omitempty"` - NOfNeighborsForInsertionOrder int32 ` protobuf:"varint,17,opt,name=n_of_neighbors_for_insertion_order,json=nOfNeighborsForInsertionOrder,proto3" json:"n_of_neighbors_for_insertion_order,omitempty"` - EpsilonForInsertionOrder float32 ` protobuf:"fixed32,18,opt,name=epsilon_for_insertion_order,json=epsilonForInsertionOrder,proto3" json:"epsilon_for_insertion_order,omitempty"` - RefinementObjectType string ` protobuf:"bytes,19,opt,name=refinement_object_type,json=refinementObjectType,proto3" json:"refinement_object_type,omitempty"` - TruncationThreshold int32 ` protobuf:"varint,20,opt,name=truncation_threshold,json=truncationThreshold,proto3" json:"truncation_threshold,omitempty"` - EdgeSizeForCreation int32 ` protobuf:"varint,21,opt,name=edge_size_for_creation,json=edgeSizeForCreation,proto3" json:"edge_size_for_creation,omitempty"` - EdgeSizeForSearch int32 ` protobuf:"varint,22,opt,name=edge_size_for_search,json=edgeSizeForSearch,proto3" json:"edge_size_for_search,omitempty"` - EdgeSizeLimitForCreation int32 ` protobuf:"varint,23,opt,name=edge_size_limit_for_creation,json=edgeSizeLimitForCreation,proto3" json:"edge_size_limit_for_creation,omitempty"` - InsertionRadiusCoefficient float64 ` protobuf:"fixed64,24,opt,name=insertion_radius_coefficient,json=insertionRadiusCoefficient,proto3" json:"insertion_radius_coefficient,omitempty"` - SeedSize int32 ` protobuf:"varint,25,opt,name=seed_size,json=seedSize,proto3" json:"seed_size,omitempty"` - SeedType string ` protobuf:"bytes,26,opt,name=seed_type,json=seedType,proto3" json:"seed_type,omitempty"` - TruncationThreadPoolSize int32 ` protobuf:"varint,27,opt,name=truncation_thread_pool_size,json=truncationThreadPoolSize,proto3" json:"truncation_thread_pool_size,omitempty"` - BatchSizeForCreation int32 ` protobuf:"varint,28,opt,name=batch_size_for_creation,json=batchSizeForCreation,proto3" json:"batch_size_for_creation,omitempty"` - GraphType string ` protobuf:"bytes,29,opt,name=graph_type,json=graphType,proto3" json:"graph_type,omitempty"` - DynamicEdgeSizeBase int32 ` protobuf:"varint,30,opt,name=dynamic_edge_size_base,json=dynamicEdgeSizeBase,proto3" json:"dynamic_edge_size_base,omitempty"` - DynamicEdgeSizeRate int32 ` protobuf:"varint,31,opt,name=dynamic_edge_size_rate,json=dynamicEdgeSizeRate,proto3" json:"dynamic_edge_size_rate,omitempty"` - BuildTimeLimit float32 ` protobuf:"fixed32,32,opt,name=build_time_limit,json=buildTimeLimit,proto3" json:"build_time_limit,omitempty"` - OutgoingEdge int32 ` protobuf:"varint,33,opt,name=outgoing_edge,json=outgoingEdge,proto3" json:"outgoing_edge,omitempty"` - IncomingEdge int32 ` protobuf:"varint,34,opt,name=incoming_edge,json=incomingEdge,proto3" json:"incoming_edge,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Dimension int32 `protobuf:"varint,1,opt,name=dimension,proto3" json:"dimension,omitempty"` + ThreadPoolSize int32 `protobuf:"varint,2,opt,name=thread_pool_size,json=threadPoolSize,proto3" json:"thread_pool_size,omitempty"` + ObjectType string `protobuf:"bytes,3,opt,name=object_type,json=objectType,proto3" json:"object_type,omitempty"` + DistanceType string `protobuf:"bytes,4,opt,name=distance_type,json=distanceType,proto3" json:"distance_type,omitempty"` + IndexType string `protobuf:"bytes,5,opt,name=index_type,json=indexType,proto3" json:"index_type,omitempty"` + DatabaseType string `protobuf:"bytes,6,opt,name=database_type,json=databaseType,proto3" json:"database_type,omitempty"` + ObjectAlignment string `protobuf:"bytes,7,opt,name=object_alignment,json=objectAlignment,proto3" json:"object_alignment,omitempty"` + PathAdjustmentInterval int32 `protobuf:"varint,8,opt,name=path_adjustment_interval,json=pathAdjustmentInterval,proto3" json:"path_adjustment_interval,omitempty"` + GraphSharedMemorySize int32 `protobuf:"varint,9,opt,name=graph_shared_memory_size,json=graphSharedMemorySize,proto3" json:"graph_shared_memory_size,omitempty"` + TreeSharedMemorySize int32 `protobuf:"varint,10,opt,name=tree_shared_memory_size,json=treeSharedMemorySize,proto3" json:"tree_shared_memory_size,omitempty"` + ObjectSharedMemorySize int32 `protobuf:"varint,11,opt,name=object_shared_memory_size,json=objectSharedMemorySize,proto3" json:"object_shared_memory_size,omitempty"` + PrefetchOffset int32 `protobuf:"varint,12,opt,name=prefetch_offset,json=prefetchOffset,proto3" json:"prefetch_offset,omitempty"` + PrefetchSize int32 `protobuf:"varint,13,opt,name=prefetch_size,json=prefetchSize,proto3" json:"prefetch_size,omitempty"` + AccuracyTable string `protobuf:"bytes,14,opt,name=accuracy_table,json=accuracyTable,proto3" json:"accuracy_table,omitempty"` + SearchType string `protobuf:"bytes,15,opt,name=search_type,json=searchType,proto3" json:"search_type,omitempty"` + MaxMagnitude float32 `protobuf:"fixed32,16,opt,name=max_magnitude,json=maxMagnitude,proto3" json:"max_magnitude,omitempty"` + NOfNeighborsForInsertionOrder int32 `protobuf:"varint,17,opt,name=n_of_neighbors_for_insertion_order,json=nOfNeighborsForInsertionOrder,proto3" json:"n_of_neighbors_for_insertion_order,omitempty"` + EpsilonForInsertionOrder float32 `protobuf:"fixed32,18,opt,name=epsilon_for_insertion_order,json=epsilonForInsertionOrder,proto3" json:"epsilon_for_insertion_order,omitempty"` + RefinementObjectType string `protobuf:"bytes,19,opt,name=refinement_object_type,json=refinementObjectType,proto3" json:"refinement_object_type,omitempty"` + TruncationThreshold int32 `protobuf:"varint,20,opt,name=truncation_threshold,json=truncationThreshold,proto3" json:"truncation_threshold,omitempty"` + EdgeSizeForCreation int32 `protobuf:"varint,21,opt,name=edge_size_for_creation,json=edgeSizeForCreation,proto3" json:"edge_size_for_creation,omitempty"` + EdgeSizeForSearch int32 `protobuf:"varint,22,opt,name=edge_size_for_search,json=edgeSizeForSearch,proto3" json:"edge_size_for_search,omitempty"` + EdgeSizeLimitForCreation int32 `protobuf:"varint,23,opt,name=edge_size_limit_for_creation,json=edgeSizeLimitForCreation,proto3" json:"edge_size_limit_for_creation,omitempty"` + InsertionRadiusCoefficient float64 `protobuf:"fixed64,24,opt,name=insertion_radius_coefficient,json=insertionRadiusCoefficient,proto3" json:"insertion_radius_coefficient,omitempty"` + SeedSize int32 `protobuf:"varint,25,opt,name=seed_size,json=seedSize,proto3" json:"seed_size,omitempty"` + SeedType string `protobuf:"bytes,26,opt,name=seed_type,json=seedType,proto3" json:"seed_type,omitempty"` + TruncationThreadPoolSize int32 `protobuf:"varint,27,opt,name=truncation_thread_pool_size,json=truncationThreadPoolSize,proto3" json:"truncation_thread_pool_size,omitempty"` + BatchSizeForCreation int32 `protobuf:"varint,28,opt,name=batch_size_for_creation,json=batchSizeForCreation,proto3" json:"batch_size_for_creation,omitempty"` + GraphType string `protobuf:"bytes,29,opt,name=graph_type,json=graphType,proto3" json:"graph_type,omitempty"` + DynamicEdgeSizeBase int32 `protobuf:"varint,30,opt,name=dynamic_edge_size_base,json=dynamicEdgeSizeBase,proto3" json:"dynamic_edge_size_base,omitempty"` + DynamicEdgeSizeRate int32 `protobuf:"varint,31,opt,name=dynamic_edge_size_rate,json=dynamicEdgeSizeRate,proto3" json:"dynamic_edge_size_rate,omitempty"` + BuildTimeLimit float32 `protobuf:"fixed32,32,opt,name=build_time_limit,json=buildTimeLimit,proto3" json:"build_time_limit,omitempty"` + OutgoingEdge int32 `protobuf:"varint,33,opt,name=outgoing_edge,json=outgoingEdge,proto3" json:"outgoing_edge,omitempty"` + IncomingEdge int32 `protobuf:"varint,34,opt,name=incoming_edge,json=incomingEdge,proto3" json:"incoming_edge,omitempty"` } func (x *Info_Index_Property) Reset() { *x = Info_Index_Property{} - mi := &file_v1_payload_payload_proto_msgTypes[86] + mi := &file_v1_payload_payload_proto_msgTypes[87] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5142,7 +5251,7 @@ func (x *Info_Index_Property) String() string { func (*Info_Index_Property) ProtoMessage() {} func (x *Info_Index_Property) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[86] + mi := &file_v1_payload_payload_proto_msgTypes[87] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5398,15 +5507,16 @@ func (x *Info_Index_Property) GetIncomingEdge() int32 { // Represents index Properties for each Agents type Info_Index_PropertyDetail struct { - state protoimpl.MessageState `protogen:"open.v1"` - Details map[string]*Info_Index_Property ` protobuf:"bytes,1,rep,name=details,proto3" json:"details,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Details map[string]*Info_Index_Property `protobuf:"bytes,1,rep,name=details,proto3" json:"details,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *Info_Index_PropertyDetail) Reset() { *x = Info_Index_PropertyDetail{} - mi := &file_v1_payload_payload_proto_msgTypes[87] + mi := &file_v1_payload_payload_proto_msgTypes[88] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5418,7 +5528,7 @@ func (x *Info_Index_PropertyDetail) String() string { func (*Info_Index_PropertyDetail) ProtoMessage() {} func (x *Info_Index_PropertyDetail) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[87] + mi := &file_v1_payload_payload_proto_msgTypes[88] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5443,15 +5553,16 @@ func (x *Info_Index_PropertyDetail) GetDetails() map[string]*Info_Index_Property // The committed UUID. type Info_Index_UUID_Committed struct { - state protoimpl.MessageState `protogen:"open.v1"` - Uuid string ` protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` } func (x *Info_Index_UUID_Committed) Reset() { *x = Info_Index_UUID_Committed{} - mi := &file_v1_payload_payload_proto_msgTypes[89] + mi := &file_v1_payload_payload_proto_msgTypes[90] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5463,7 +5574,7 @@ func (x *Info_Index_UUID_Committed) String() string { func (*Info_Index_UUID_Committed) ProtoMessage() {} func (x *Info_Index_UUID_Committed) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[89] + mi := &file_v1_payload_payload_proto_msgTypes[90] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5488,15 +5599,16 @@ func (x *Info_Index_UUID_Committed) GetUuid() string { // The uncommitted UUID. type Info_Index_UUID_Uncommitted struct { - state protoimpl.MessageState `protogen:"open.v1"` - Uuid string ` protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Uuid string `protobuf:"bytes,1,opt,name=uuid,proto3" json:"uuid,omitempty"` } func (x *Info_Index_UUID_Uncommitted) Reset() { *x = Info_Index_UUID_Uncommitted{} - mi := &file_v1_payload_payload_proto_msgTypes[90] + mi := &file_v1_payload_payload_proto_msgTypes[91] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5508,7 +5620,7 @@ func (x *Info_Index_UUID_Uncommitted) String() string { func (*Info_Index_UUID_Uncommitted) ProtoMessage() {} func (x *Info_Index_UUID_Uncommitted) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[90] + mi := &file_v1_payload_payload_proto_msgTypes[91] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5533,18 +5645,19 @@ func (x *Info_Index_UUID_Uncommitted) GetUuid() string { // Represent server information. type Mirror_Target struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The target hostname. - Host string ` protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` // The target port. - Port uint32 ` protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Port uint32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` } func (x *Mirror_Target) Reset() { *x = Mirror_Target{} - mi := &file_v1_payload_payload_proto_msgTypes[95] + mi := &file_v1_payload_payload_proto_msgTypes[96] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5556,7 +5669,7 @@ func (x *Mirror_Target) String() string { func (*Mirror_Target) ProtoMessage() {} func (x *Mirror_Target) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[95] + mi := &file_v1_payload_payload_proto_msgTypes[96] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5588,16 +5701,17 @@ func (x *Mirror_Target) GetPort() uint32 { // Represent the multiple Target message. type Mirror_Targets struct { - state protoimpl.MessageState `protogen:"open.v1"` - // The multiple target information. - Targets []*Mirror_Target ` protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The multiple target information. + Targets []*Mirror_Target `protobuf:"bytes,1,rep,name=targets,proto3" json:"targets,omitempty"` } func (x *Mirror_Targets) Reset() { *x = Mirror_Targets{} - mi := &file_v1_payload_payload_proto_msgTypes[96] + mi := &file_v1_payload_payload_proto_msgTypes[97] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5609,7 +5723,7 @@ func (x *Mirror_Targets) String() string { func (*Mirror_Targets) ProtoMessage() {} func (x *Mirror_Targets) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[96] + mi := &file_v1_payload_payload_proto_msgTypes[97] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5633,15 +5747,16 @@ func (x *Mirror_Targets) GetTargets() []*Mirror_Target { } type Meta_Key struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key string ` protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` } func (x *Meta_Key) Reset() { *x = Meta_Key{} - mi := &file_v1_payload_payload_proto_msgTypes[97] + mi := &file_v1_payload_payload_proto_msgTypes[98] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5653,7 +5768,7 @@ func (x *Meta_Key) String() string { func (*Meta_Key) ProtoMessage() {} func (x *Meta_Key) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[97] + mi := &file_v1_payload_payload_proto_msgTypes[98] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5677,15 +5792,16 @@ func (x *Meta_Key) GetKey() string { } type Meta_Value struct { - state protoimpl.MessageState `protogen:"open.v1"` - Value *anypb.Any ` protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *anypb.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` } func (x *Meta_Value) Reset() { *x = Meta_Value{} - mi := &file_v1_payload_payload_proto_msgTypes[98] + mi := &file_v1_payload_payload_proto_msgTypes[99] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5697,7 +5813,7 @@ func (x *Meta_Value) String() string { func (*Meta_Value) ProtoMessage() {} func (x *Meta_Value) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[98] + mi := &file_v1_payload_payload_proto_msgTypes[99] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5721,16 +5837,17 @@ func (x *Meta_Value) GetValue() *anypb.Any { } type Meta_KeyValue struct { - state protoimpl.MessageState `protogen:"open.v1"` - Key *Meta_Key ` protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` - Value *Meta_Value ` protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key *Meta_Key `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *Meta_Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` } func (x *Meta_KeyValue) Reset() { *x = Meta_KeyValue{} - mi := &file_v1_payload_payload_proto_msgTypes[99] + mi := &file_v1_payload_payload_proto_msgTypes[100] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -5742,7 +5859,7 @@ func (x *Meta_KeyValue) String() string { func (*Meta_KeyValue) ProtoMessage() {} func (x *Meta_KeyValue) ProtoReflect() protoreflect.Message { - mi := &file_v1_payload_payload_proto_msgTypes[99] + mi := &file_v1_payload_payload_proto_msgTypes[100] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -5922,7 +6039,7 @@ var file_v1_payload_payload_proto_rawDesc = []byte{ 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x9d, 0x05, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xfe, 0x05, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x1a, 0x79, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, @@ -5951,574 +6068,580 @@ var file_v1_payload_payload_proto_rawDesc = []byte{ 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x63, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x33, 0x0a, 0x07, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, - 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, - 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x64, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x9d, 0x05, 0x0a, 0x06, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, - 0x1a, 0x79, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x06, 0x76, - 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, - 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, - 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, - 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x46, 0x0a, 0x0c, 0x4d, - 0x75, 0x6c, 0x74, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x72, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, - 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x73, 0x1a, 0xae, 0x01, 0x0a, 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, - 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x52, 0x06, - 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, - 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, - 0x72, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0a, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x69, 0x7a, 0x65, 0x72, 0x1a, 0x52, 0x0a, 0x12, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x4f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x08, 0x72, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, - 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x69, - 0x63, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x74, 0x72, 0x69, 0x63, 0x74, - 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x33, 0x0a, 0x07, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x12, - 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, 0x0a, - 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, - 0x64, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x15, - 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x64, 0x55, - 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x91, 0x04, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x1a, 0x63, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x49, 0x44, 0x52, 0x02, - 0x69, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, - 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x46, 0x0a, 0x0c, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x50, 0x0a, - 0x10, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, - 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, - 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, 0x1a, - 0xa8, 0x01, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, 0x0a, - 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x41, 0x0a, 0x08, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, 0x2e, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, 0x3a, - 0x0a, 0x08, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x06, 0x0a, 0x02, 0x45, 0x71, - 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x4e, 0x65, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x65, - 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x74, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x65, - 0x10, 0x04, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x74, 0x10, 0x05, 0x1a, 0x5d, 0x0a, 0x06, 0x43, 0x6f, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x5f, 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x17, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x12, 0x14, 0x0a, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x05, 0x66, 0x6f, 0x72, 0x63, 0x65, 0x1a, 0xca, 0x01, 0x0a, 0x06, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x74, 0x72, 0x69, 0x63, - 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x0a, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x12, 0x0a, 0x05, 0x46, 0x6c, 0x75, - 0x73, 0x68, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb1, 0x0b, - 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x75, 0x0a, 0x0d, 0x56, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x02, 0x69, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, - 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x49, 0x44, 0x42, 0x08, 0xba, 0x48, - 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x02, 0x69, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x66, 0x69, - 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x2e, - 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x1a, - 0x36, 0x0a, 0x08, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x64, - 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, 0x64, - 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x84, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x08, 0x64, 0x69, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, + 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x33, 0x0a, 0x07, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, + 0x0a, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x64, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x64, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x9d, 0x05, 0x0a, 0x06, 0x55, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x1a, 0x79, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3b, 0x0a, 0x06, + 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, - 0x2e, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x08, 0x64, 0x69, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x1d, - 0x0a, 0x02, 0x49, 0x44, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x17, 0x0a, - 0x03, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x1a, 0x61, 0x0a, 0x06, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, - 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, - 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x06, 0x76, 0x65, 0x63, - 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x02, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, - 0x02, 0x08, 0x02, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, - 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1a, 0x43, 0x0a, 0x10, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, - 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x49, 0x44, - 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x42, - 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x17, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x1a, 0x3e, 0x0a, 0x07, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x33, 0x0a, - 0x07, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, - 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x76, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x73, 0x1a, 0x7c, 0x0a, 0x0c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x56, 0x65, 0x63, 0x74, - 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, - 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, - 0x1a, 0x3d, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x68, 0x61, 0x70, 0x65, 0x56, 0x65, 0x63, 0x74, 0x6f, - 0x72, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, 0x61, - 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x05, 0x73, 0x68, 0x61, 0x70, 0x65, 0x1a, - 0x37, 0x0a, 0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x74, 0x0a, 0x0a, 0x53, 0x74, 0x72, 0x65, - 0x61, 0x6d, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, - 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x48, 0x00, 0x52, - 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, - 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x44, - 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, - 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, - 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x03, 0x69, 0x70, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, 0x6f, - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x46, 0x0a, 0x09, 0x4c, - 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, 0x0a, 0x09, 0x6c, 0x6f, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, - 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x1a, 0x09, 0x0a, 0x07, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, + 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, + 0x02, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x46, 0x0a, 0x0c, + 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, + 0x72, 0x74, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x73, 0x1a, 0xae, 0x01, 0x0a, 0x0d, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x52, + 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, + 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x66, + 0x69, 0x67, 0x52, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x39, 0x0a, 0x0a, 0x76, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, + 0x65, 0x72, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x0a, 0x76, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x69, 0x7a, 0x65, 0x72, 0x1a, 0x52, 0x0a, 0x12, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x4f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x08, 0x72, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x73, 0x65, 0x72, + 0x74, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0xca, 0x01, 0x0a, 0x06, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x74, 0x72, + 0x69, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x74, 0x72, 0x69, 0x63, + 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x33, 0x0a, 0x07, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x36, + 0x0a, 0x17, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x64, 0x5f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x15, 0x64, 0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x64, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x91, 0x04, 0x0a, 0x06, 0x52, 0x65, 0x6d, 0x6f, 0x76, + 0x65, 0x1a, 0x63, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x25, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x49, 0x44, 0x52, + 0x02, 0x69, 0x64, 0x12, 0x31, 0x0a, 0x06, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x06, + 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x1a, 0x46, 0x0a, 0x0c, 0x4d, 0x75, 0x6c, 0x74, 0x69, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x36, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x73, 0x1a, 0x50, + 0x0a, 0x10, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3c, 0x0a, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, + 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x73, + 0x1a, 0xa8, 0x01, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x1c, + 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x41, 0x0a, 0x08, + 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x25, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x6d, 0x6f, + 0x76, 0x65, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x08, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x22, + 0x3a, 0x0a, 0x08, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x06, 0x0a, 0x02, 0x45, + 0x71, 0x10, 0x00, 0x12, 0x06, 0x0a, 0x02, 0x4e, 0x65, 0x10, 0x01, 0x12, 0x06, 0x0a, 0x02, 0x47, + 0x65, 0x10, 0x02, 0x12, 0x06, 0x0a, 0x02, 0x47, 0x74, 0x10, 0x03, 0x12, 0x06, 0x0a, 0x02, 0x4c, + 0x65, 0x10, 0x04, 0x12, 0x06, 0x0a, 0x02, 0x4c, 0x74, 0x10, 0x05, 0x1a, 0x5d, 0x0a, 0x06, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x35, 0x0a, 0x17, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x73, 0x74, + 0x72, 0x69, 0x63, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x5f, 0x63, 0x68, 0x65, 0x63, 0x6b, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x14, 0x73, 0x6b, 0x69, 0x70, 0x53, 0x74, 0x72, 0x69, + 0x63, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x12, 0x1c, 0x0a, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x12, 0x0a, 0x05, 0x46, 0x6c, + 0x75, 0x73, 0x68, 0x1a, 0x09, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0xb1, + 0x0b, 0x0a, 0x06, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x75, 0x0a, 0x0d, 0x56, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x02, 0x69, 0x64, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x49, 0x44, 0x42, 0x08, 0xba, + 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x02, 0x69, 0x64, 0x12, 0x33, 0x0a, 0x07, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x46, 0x69, 0x6c, 0x74, 0x65, 0x72, + 0x2e, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x07, 0x66, 0x69, 0x6c, 0x74, 0x65, 0x72, 0x73, + 0x1a, 0x36, 0x0a, 0x08, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x0e, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, + 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x02, 0x52, 0x08, + 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x84, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x39, 0x0a, 0x08, 0x64, + 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, + 0x74, 0x2e, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x08, 0x64, 0x69, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, + 0x1d, 0x0a, 0x02, 0x49, 0x44, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x1a, 0x17, + 0x0a, 0x03, 0x49, 0x44, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x03, 0x69, 0x64, 0x73, 0x1a, 0x61, 0x0a, 0x06, 0x56, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, + 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x20, 0x0a, 0x06, 0x76, 0x65, + 0x63, 0x74, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x03, 0x28, 0x02, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, + 0x01, 0x02, 0x08, 0x02, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x0a, 0x09, + 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x1a, 0x43, 0x0a, 0x10, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x49, + 0x44, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x02, 0x52, 0x02, 0x69, 0x64, 0x1a, + 0x42, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x17, 0x0a, 0x02, + 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, + 0x01, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, + 0x61, 0x6d, 0x70, 0x1a, 0x3e, 0x0a, 0x07, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x33, + 0x0a, 0x07, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x52, 0x07, 0x76, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x73, 0x1a, 0x7c, 0x0a, 0x0c, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x56, 0x65, 0x63, + 0x74, 0x6f, 0x72, 0x12, 0x33, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x48, 0x00, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x3a, 0x0a, 0x12, - 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x07, 0xba, 0x48, 0x04, 0x2a, 0x02, 0x28, 0x00, 0x52, 0x08, - 0x70, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66, 0x0a, 0x0a, 0x44, 0x69, 0x73, 0x63, - 0x6f, 0x76, 0x65, 0x72, 0x65, 0x72, 0x1a, 0x58, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, - 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, - 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, - 0x22, 0xc2, 0x2b, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x80, 0x20, 0x0a, 0x05, 0x49, 0x6e, - 0x64, 0x65, 0x78, 0x1a, 0x75, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, 0x74, - 0x6f, 0x72, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, - 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x75, 0x6e, 0x63, 0x6f, 0x6d, - 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x69, - 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x69, - 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x06, 0x73, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x1a, 0xdf, 0x01, 0x0a, 0x06, 0x44, - 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x41, 0x0a, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, - 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x44, 0x65, - 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, - 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c, 0x69, - 0x63, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x6e, 0x74, - 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x76, 0x65, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x73, 0x1a, 0x57, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, - 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x6f, 0x75, 0x6e, - 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, 0x04, - 0x55, 0x55, 0x49, 0x44, 0x1a, 0x1f, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, - 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x75, 0x75, 0x69, 0x64, 0x1a, 0x21, 0x0a, 0x0b, 0x55, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, - 0x74, 0x74, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x1a, 0x9d, 0x0d, 0x0a, 0x0a, 0x53, 0x74, 0x61, - 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x27, 0x0a, - 0x0f, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x49, 0x6e, - 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, - 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0f, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, - 0x65, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, - 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x49, 0x6e, - 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x4f, 0x66, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x33, 0x0a, - 0x16, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x69, - 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, 0x6d, - 0x69, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, - 0x65, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, 0x69, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, - 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x6f, 0x64, - 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, - 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x25, - 0x0a, 0x0e, 0x6d, 0x6f, 0x64, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x6f, 0x64, 0x65, 0x4f, 0x75, 0x74, 0x64, - 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x3a, 0x0a, 0x1a, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x73, - 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x31, 0x30, 0x5f, 0x65, 0x64, - 0x67, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x6f, 0x64, 0x65, 0x73, - 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x31, 0x30, 0x45, 0x64, 0x67, 0x65, - 0x73, 0x12, 0x4c, 0x0a, 0x23, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, 0x70, - 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, - 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1f, - 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x49, - 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, - 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x65, 0x64, 0x67, - 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, - 0x4f, 0x66, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x62, 0x65, - 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x62, - 0x65, 0x72, 0x4f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, - 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, - 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x1d, 0x6e, 0x75, - 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x77, 0x69, - 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x04, 0x52, 0x19, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x73, - 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x46, 0x0a, 0x20, - 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, - 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, - 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, - 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x49, 0x6e, 0x64, 0x65, - 0x67, 0x72, 0x65, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, - 0x66, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x12, 0x39, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x72, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x12, 0x20, - 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x73, - 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, - 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, - 0x73, 0x69, 0x7a, 0x65, 0x4f, 0x66, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4e, 0x0a, 0x24, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, - 0x66, 0x5f, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x62, 0x6a, - 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x04, 0x52, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x4f, 0x66, 0x52, 0x65, 0x66, 0x69, - 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x70, 0x6f, - 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, - 0x63, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x15, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x4f, 0x66, - 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x76, 0x61, 0x72, 0x69, - 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, - 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x13, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x63, - 0x65, 0x4f, 0x66, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x28, 0x0a, 0x10, - 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x18, 0x17, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x6d, 0x65, 0x61, 0x6e, 0x45, 0x64, 0x67, 0x65, - 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x3f, 0x0a, 0x1d, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x65, - 0x64, 0x67, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x31, - 0x30, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x01, 0x52, 0x18, 0x6d, - 0x65, 0x61, 0x6e, 0x45, 0x64, 0x67, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x46, 0x6f, 0x72, - 0x31, 0x30, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x23, 0x6d, 0x65, 0x61, 0x6e, 0x5f, - 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x31, 0x30, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x19, - 0x20, 0x01, 0x28, 0x01, 0x52, 0x1e, 0x6d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, - 0x65, 0x65, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x6f, 0x72, 0x31, 0x30, 0x45, - 0x64, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x1d, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, - 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, - 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x18, 0x6d, 0x65, 0x61, - 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x45, 0x64, 0x67, 0x65, 0x73, 0x50, 0x65, - 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x31, 0x5f, 0x69, 0x6e, 0x64, 0x65, - 0x67, 0x72, 0x65, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x63, 0x31, 0x49, 0x6e, - 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x35, 0x5f, 0x69, 0x6e, 0x64, - 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x63, 0x35, 0x49, - 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x39, 0x35, 0x5f, 0x6f, - 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, - 0x63, 0x39, 0x35, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, - 0x63, 0x39, 0x39, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1e, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0c, 0x63, 0x39, 0x39, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, - 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x03, 0x52, 0x0d, 0x69, 0x6e, 0x64, 0x65, 0x67, - 0x72, 0x65, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x6f, 0x75, 0x74, 0x64, - 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, - 0x20, 0x20, 0x03, 0x28, 0x04, 0x52, 0x12, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, - 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x2d, 0x0a, 0x12, 0x69, 0x6e, 0x64, - 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, - 0x21, 0x20, 0x03, 0x28, 0x04, 0x52, 0x11, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x1a, 0xc1, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x61, - 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x4e, 0x0a, - 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x34, - 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, - 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x5d, 0x0a, - 0x0c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, - 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, - 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x21, - 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, - 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, - 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xaf, 0x0c, 0x0a, - 0x08, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, 0x6d, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x64, 0x69, - 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x65, 0x61, - 0x64, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x05, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, - 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x64, 0x65, 0x78, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, 0x64, - 0x65, 0x78, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, - 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, - 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, 0x69, - 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x61, - 0x64, 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x76, - 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x41, 0x64, - 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, 0x6c, - 0x12, 0x37, 0x0a, 0x18, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, - 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x15, 0x67, 0x72, 0x61, 0x70, 0x68, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4d, - 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x74, 0x72, 0x65, - 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, - 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x74, 0x72, 0x65, 0x65, - 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, - 0x12, 0x39, 0x0a, 0x19, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, - 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x16, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x68, 0x61, 0x72, 0x65, - 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x70, - 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x4f, 0x66, - 0x66, 0x73, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x70, 0x72, 0x65, - 0x66, 0x65, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, 0x63, - 0x75, 0x72, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x54, 0x79, 0x70, - 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x61, 0x67, 0x6e, 0x69, 0x74, 0x75, - 0x64, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x4d, 0x61, 0x67, - 0x6e, 0x69, 0x74, 0x75, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x22, 0x6e, 0x5f, 0x6f, 0x66, 0x5f, 0x6e, - 0x65, 0x69, 0x67, 0x68, 0x62, 0x6f, 0x72, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x73, - 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x11, 0x20, 0x01, - 0x28, 0x05, 0x52, 0x1d, 0x6e, 0x4f, 0x66, 0x4e, 0x65, 0x69, 0x67, 0x68, 0x62, 0x6f, 0x72, 0x73, + 0x64, 0x1a, 0x3d, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x68, 0x61, 0x70, 0x65, 0x56, 0x65, 0x63, 0x74, + 0x6f, 0x72, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x68, + 0x61, 0x70, 0x65, 0x18, 0x02, 0x20, 0x03, 0x28, 0x05, 0x52, 0x05, 0x73, 0x68, 0x61, 0x70, 0x65, + 0x1a, 0x37, 0x0a, 0x04, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x02, 0x69, + 0x64, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x06, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x1a, 0x74, 0x0a, 0x0a, 0x53, 0x74, 0x72, + 0x65, 0x61, 0x6d, 0x42, 0x6c, 0x6f, 0x62, 0x12, 0x2d, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, + 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x42, 0x6c, 0x6f, 0x62, 0x48, 0x00, + 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, + 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, + 0x44, 0x0a, 0x08, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, + 0x75, 0x69, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x03, 0x69, 0x70, 0x73, 0x1a, 0x84, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x08, 0x6c, 0x6f, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, 0x61, 0x79, + 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, + 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x48, 0x00, 0x52, 0x08, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x1a, 0x46, 0x0a, 0x09, + 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x39, 0x0a, 0x09, 0x6c, 0x6f, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x6c, 0x6f, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x8b, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x73, 0x74, 0x1a, 0x09, 0x0a, + 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x78, 0x0a, 0x08, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x56, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x48, + 0x00, 0x52, 0x06, 0x76, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, + 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x42, 0x09, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x22, 0x45, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x1a, 0x3a, 0x0a, + 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x12, 0x24, 0x0a, 0x09, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x42, 0x07, 0xba, 0x48, 0x04, 0x2a, 0x02, 0x28, 0x00, 0x52, + 0x08, 0x70, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x22, 0x66, 0x0a, 0x0a, 0x44, 0x69, 0x73, + 0x63, 0x6f, 0x76, 0x65, 0x72, 0x65, 0x72, 0x1a, 0x58, 0x0a, 0x07, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x07, 0xba, 0x48, 0x04, 0x72, 0x02, 0x10, 0x01, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x64, + 0x65, 0x22, 0xc2, 0x2b, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x1a, 0x80, 0x20, 0x0a, 0x05, 0x49, + 0x6e, 0x64, 0x65, 0x78, 0x1a, 0x75, 0x0a, 0x05, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x16, 0x0a, + 0x06, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x64, 0x12, 0x20, 0x0a, 0x0b, 0x75, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, 0x69, + 0x74, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0b, 0x75, 0x6e, 0x63, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x69, 0x6e, 0x64, 0x65, 0x78, + 0x69, 0x6e, 0x67, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x06, 0x73, 0x61, 0x76, 0x69, 0x6e, 0x67, 0x1a, 0xdf, 0x01, 0x0a, 0x06, + 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x41, 0x0a, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x44, + 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, 0x74, 0x72, + 0x79, 0x52, 0x06, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x70, + 0x6c, 0x69, 0x63, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x07, 0x72, 0x65, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x76, 0x65, 0x5f, 0x61, 0x67, 0x65, 0x6e, + 0x74, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x0a, 0x6c, 0x69, 0x76, 0x65, 0x41, 0x67, + 0x65, 0x6e, 0x74, 0x73, 0x1a, 0x57, 0x0a, 0x0b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x32, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x43, 0x6f, 0x75, + 0x6e, 0x74, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4a, 0x0a, + 0x04, 0x55, 0x55, 0x49, 0x44, 0x1a, 0x1f, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, + 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x1a, 0x21, 0x0a, 0x0b, 0x55, 0x6e, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x74, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x75, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x75, 0x69, 0x64, 0x1a, 0x9d, 0x0d, 0x0a, 0x0a, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x27, + 0x0a, 0x0f, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x49, + 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x6d, 0x65, 0x64, 0x69, 0x61, + 0x6e, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0f, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x6e, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, + 0x65, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x49, + 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, + 0x65, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x33, + 0x0a, 0x16, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, + 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x04, 0x52, 0x13, + 0x6d, 0x69, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x67, + 0x72, 0x65, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x69, 0x6e, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x14, 0x6d, 0x69, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, + 0x66, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x6f, + 0x64, 0x65, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, + 0x04, 0x52, 0x0c, 0x6d, 0x6f, 0x64, 0x65, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, + 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x64, 0x65, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, + 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6d, 0x6f, 0x64, 0x65, 0x4f, 0x75, 0x74, + 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x3a, 0x0a, 0x1a, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, + 0x73, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x31, 0x30, 0x5f, 0x65, + 0x64, 0x67, 0x65, 0x73, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x6f, 0x64, 0x65, + 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x46, 0x6f, 0x72, 0x31, 0x30, 0x45, 0x64, 0x67, + 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x23, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x73, 0x6b, 0x69, 0x70, + 0x70, 0x65, 0x64, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, + 0x5f, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x1f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x53, 0x6b, 0x69, 0x70, 0x70, 0x65, 0x64, 0x46, 0x6f, 0x72, + 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x65, 0x64, + 0x67, 0x65, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x4f, 0x66, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x39, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x4f, 0x66, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, + 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0d, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x1d, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x5f, 0x77, + 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x0f, 0x20, 0x01, + 0x28, 0x04, 0x52, 0x19, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4e, 0x6f, 0x64, 0x65, + 0x73, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x46, 0x0a, + 0x20, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x73, + 0x5f, 0x77, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, + 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x04, 0x52, 0x1c, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, + 0x66, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x57, 0x69, 0x74, 0x68, 0x6f, 0x75, 0x74, 0x49, 0x6e, 0x64, + 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, + 0x6f, 0x66, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x0f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x12, 0x39, 0x0a, 0x19, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x16, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x52, 0x65, + 0x6d, 0x6f, 0x76, 0x65, 0x64, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x12, 0x39, 0x0a, 0x19, + 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, + 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, 0x13, 0x20, 0x01, 0x28, 0x04, 0x52, + 0x16, 0x73, 0x69, 0x7a, 0x65, 0x4f, 0x66, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x4e, 0x0a, 0x24, 0x73, 0x69, 0x7a, 0x65, 0x5f, + 0x6f, 0x66, 0x5f, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6f, 0x62, + 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x04, 0x52, 0x20, 0x73, 0x69, 0x7a, 0x65, 0x4f, 0x66, 0x52, 0x65, 0x66, + 0x69, 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x65, 0x70, + 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x30, 0x0a, 0x14, 0x76, 0x61, 0x72, 0x69, 0x61, + 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, + 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x12, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, 0x63, 0x65, 0x4f, + 0x66, 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x32, 0x0a, 0x15, 0x76, 0x61, 0x72, + 0x69, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6f, 0x66, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, + 0x65, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x13, 0x76, 0x61, 0x72, 0x69, 0x61, 0x6e, + 0x63, 0x65, 0x4f, 0x66, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x28, 0x0a, + 0x10, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x17, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0e, 0x6d, 0x65, 0x61, 0x6e, 0x45, 0x64, 0x67, + 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x3f, 0x0a, 0x1d, 0x6d, 0x65, 0x61, 0x6e, 0x5f, + 0x65, 0x64, 0x67, 0x65, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x5f, 0x66, 0x6f, 0x72, 0x5f, + 0x31, 0x30, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, 0x18, 0x20, 0x01, 0x28, 0x01, 0x52, 0x18, + 0x6d, 0x65, 0x61, 0x6e, 0x45, 0x64, 0x67, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x46, 0x6f, + 0x72, 0x31, 0x30, 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x4b, 0x0a, 0x23, 0x6d, 0x65, 0x61, 0x6e, + 0x5f, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x31, 0x30, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1e, 0x6d, 0x65, 0x61, 0x6e, 0x49, 0x6e, 0x64, 0x65, 0x67, + 0x72, 0x65, 0x65, 0x44, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x46, 0x6f, 0x72, 0x31, 0x30, + 0x45, 0x64, 0x67, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x1d, 0x6d, 0x65, 0x61, 0x6e, 0x5f, 0x6e, 0x75, + 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x73, 0x5f, 0x70, 0x65, + 0x72, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x18, 0x6d, 0x65, + 0x61, 0x6e, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x45, 0x64, 0x67, 0x65, 0x73, 0x50, + 0x65, 0x72, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x31, 0x5f, 0x69, 0x6e, 0x64, + 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x63, 0x31, 0x49, + 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x35, 0x5f, 0x69, 0x6e, + 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x63, 0x35, + 0x49, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x39, 0x35, 0x5f, + 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1d, 0x20, 0x01, 0x28, 0x01, 0x52, + 0x0c, 0x63, 0x39, 0x35, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x12, 0x23, 0x0a, + 0x0d, 0x63, 0x39, 0x39, 0x5f, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x18, 0x1e, + 0x20, 0x01, 0x28, 0x01, 0x52, 0x0c, 0x63, 0x39, 0x39, 0x4f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, + 0x65, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x03, 0x52, 0x0d, 0x69, 0x6e, 0x64, 0x65, + 0x67, 0x72, 0x65, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2f, 0x0a, 0x13, 0x6f, 0x75, 0x74, + 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, + 0x18, 0x20, 0x20, 0x03, 0x28, 0x04, 0x52, 0x12, 0x6f, 0x75, 0x74, 0x64, 0x65, 0x67, 0x72, 0x65, + 0x65, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x2d, 0x0a, 0x12, 0x69, 0x6e, + 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, 0x5f, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, + 0x18, 0x21, 0x20, 0x03, 0x28, 0x04, 0x52, 0x11, 0x69, 0x6e, 0x64, 0x65, 0x67, 0x72, 0x65, 0x65, + 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x1a, 0xc1, 0x01, 0x0a, 0x10, 0x53, 0x74, + 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, 0x63, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x12, 0x4e, + 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x34, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, 0x5d, + 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x37, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x21, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, + 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x69, 0x73, 0x74, 0x69, + 0x63, 0x73, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xaf, 0x0c, + 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x12, 0x1c, 0x0a, 0x09, 0x64, 0x69, + 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x09, 0x64, + 0x69, 0x6d, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x68, 0x72, 0x65, + 0x61, 0x64, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x0e, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x69, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, + 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x69, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x69, 0x6e, 0x64, 0x65, + 0x78, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x69, 0x6e, + 0x64, 0x65, 0x78, 0x54, 0x79, 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x61, 0x74, 0x61, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, + 0x64, 0x61, 0x74, 0x61, 0x62, 0x61, 0x73, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x29, 0x0a, 0x10, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, + 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x41, 0x6c, + 0x69, 0x67, 0x6e, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x18, 0x70, 0x61, 0x74, 0x68, 0x5f, + 0x61, 0x64, 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x6e, 0x74, 0x65, 0x72, + 0x76, 0x61, 0x6c, 0x18, 0x08, 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x41, + 0x64, 0x6a, 0x75, 0x73, 0x74, 0x6d, 0x65, 0x6e, 0x74, 0x49, 0x6e, 0x74, 0x65, 0x72, 0x76, 0x61, + 0x6c, 0x12, 0x37, 0x0a, 0x18, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, + 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x09, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x15, 0x67, 0x72, 0x61, 0x70, 0x68, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, + 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x74, 0x72, + 0x65, 0x65, 0x5f, 0x73, 0x68, 0x61, 0x72, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x74, 0x72, 0x65, + 0x65, 0x53, 0x68, 0x61, 0x72, 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, + 0x65, 0x12, 0x39, 0x0a, 0x19, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x73, 0x68, 0x61, 0x72, + 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x05, 0x52, 0x16, 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x53, 0x68, 0x61, 0x72, + 0x65, 0x64, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x27, 0x0a, 0x0f, + 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x5f, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0e, 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x4f, + 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x72, 0x65, 0x66, 0x65, 0x74, 0x63, + 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x70, 0x72, + 0x65, 0x66, 0x65, 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x63, + 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x0e, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0d, 0x61, 0x63, 0x63, 0x75, 0x72, 0x61, 0x63, 0x79, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x61, 0x67, 0x6e, 0x69, 0x74, + 0x75, 0x64, 0x65, 0x18, 0x10, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0c, 0x6d, 0x61, 0x78, 0x4d, 0x61, + 0x67, 0x6e, 0x69, 0x74, 0x75, 0x64, 0x65, 0x12, 0x49, 0x0a, 0x22, 0x6e, 0x5f, 0x6f, 0x66, 0x5f, + 0x6e, 0x65, 0x69, 0x67, 0x68, 0x62, 0x6f, 0x72, 0x73, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x69, 0x6e, + 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x1d, 0x6e, 0x4f, 0x66, 0x4e, 0x65, 0x69, 0x67, 0x68, 0x62, 0x6f, 0x72, + 0x73, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x72, 0x64, + 0x65, 0x72, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x70, 0x73, 0x69, 0x6c, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, + 0x72, 0x5f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x72, 0x64, 0x65, + 0x72, 0x18, 0x12, 0x20, 0x01, 0x28, 0x02, 0x52, 0x18, 0x65, 0x70, 0x73, 0x69, 0x6c, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x72, 0x64, 0x65, - 0x72, 0x12, 0x3d, 0x0a, 0x1b, 0x65, 0x70, 0x73, 0x69, 0x6c, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x5f, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, - 0x18, 0x12, 0x20, 0x01, 0x28, 0x02, 0x52, 0x18, 0x65, 0x70, 0x73, 0x69, 0x6c, 0x6f, 0x6e, 0x46, - 0x6f, 0x72, 0x49, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x4f, 0x72, 0x64, 0x65, 0x72, - 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, 0x6f, - 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x14, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4f, 0x62, 0x6a, 0x65, - 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x14, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x14, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x65, 0x64, 0x67, - 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x65, 0x64, 0x67, 0x65, 0x53, - 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, - 0x0a, 0x14, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, - 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x18, 0x16, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x65, 0x64, - 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, 0x12, - 0x3e, 0x0a, 0x1c, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, 0x6d, - 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, - 0x17, 0x20, 0x01, 0x28, 0x05, 0x52, 0x18, 0x65, 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x40, 0x0a, 0x1c, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, 0x64, - 0x69, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x74, 0x18, - 0x18, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1a, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x61, 0x64, 0x69, 0x75, 0x73, 0x43, 0x6f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, - 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x19, - 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x65, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1b, - 0x0a, 0x09, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x1a, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x73, 0x65, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x1b, 0x74, - 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, 0x64, - 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x18, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, 0x65, - 0x61, 0x64, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x62, 0x61, - 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x65, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x62, 0x61, 0x74, - 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, - 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x61, 0x70, 0x68, 0x54, 0x79, 0x70, 0x65, - 0x12, 0x33, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x64, 0x67, 0x65, - 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, - 0x65, 0x42, 0x61, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, - 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, 0x18, - 0x1f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x64, - 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x52, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x20, - 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x4c, - 0x69, 0x6d, 0x69, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, - 0x5f, 0x65, 0x64, 0x67, 0x65, 0x18, 0x21, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6f, 0x75, 0x74, - 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x45, 0x64, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x63, - 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, 0x05, - 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x45, 0x64, 0x67, 0x65, 0x1a, 0xbb, - 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, - 0x6c, 0x12, 0x4c, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, - 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x1a, - 0x5b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, - 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, - 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x1f, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xef, 0x01, 0x0a, - 0x03, 0x50, 0x6f, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, - 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, 0xba, - 0x48, 0x04, 0x72, 0x02, 0x78, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x26, 0x0a, 0x03, 0x63, 0x70, - 0x75, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, - 0x70, 0x75, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, - 0x6f, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, - 0x6e, 0x66, 0x6f, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x1a, 0xe8, - 0x01, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, - 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, - 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, - 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x2f, 0x0a, - 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x29, - 0x0a, 0x04, 0x50, 0x6f, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, - 0x6f, 0x64, 0x73, 0x52, 0x04, 0x50, 0x6f, 0x64, 0x73, 0x1a, 0x82, 0x02, 0x0a, 0x07, 0x53, 0x65, - 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x70, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x70, 0x6f, 0x72, - 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, - 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, - 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x2f, 0x0a, - 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x3e, - 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, - 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x35, - 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, - 0x04, 0x70, 0x6f, 0x72, 0x74, 0x1a, 0x80, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x12, 0x3b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x23, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, - 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, - 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, - 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x9e, 0x01, 0x0a, 0x0b, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, 0x2e, + 0x72, 0x12, 0x34, 0x0a, 0x16, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x5f, + 0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x14, 0x72, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x4f, 0x62, 0x6a, + 0x65, 0x63, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x31, 0x0a, 0x14, 0x74, 0x72, 0x75, 0x6e, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x33, 0x0a, 0x16, 0x65, 0x64, + 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x15, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x65, 0x64, 0x67, 0x65, + 0x53, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x2f, 0x0a, 0x14, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, + 0x5f, 0x73, 0x65, 0x61, 0x72, 0x63, 0x68, 0x18, 0x16, 0x20, 0x01, 0x28, 0x05, 0x52, 0x11, 0x65, + 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x53, 0x65, 0x61, 0x72, 0x63, 0x68, + 0x12, 0x3e, 0x0a, 0x1c, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x6c, 0x69, + 0x6d, 0x69, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x17, 0x20, 0x01, 0x28, 0x05, 0x52, 0x18, 0x65, 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x46, 0x6f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x12, 0x40, 0x0a, 0x1c, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x72, 0x61, + 0x64, 0x69, 0x75, 0x73, 0x5f, 0x63, 0x6f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, 0x6e, 0x74, + 0x18, 0x18, 0x20, 0x01, 0x28, 0x01, 0x52, 0x1a, 0x69, 0x6e, 0x73, 0x65, 0x72, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x61, 0x64, 0x69, 0x75, 0x73, 0x43, 0x6f, 0x65, 0x66, 0x66, 0x69, 0x63, 0x69, 0x65, + 0x6e, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x73, 0x65, 0x65, 0x64, 0x53, 0x69, 0x7a, 0x65, 0x12, + 0x1b, 0x0a, 0x09, 0x73, 0x65, 0x65, 0x64, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x1a, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x73, 0x65, 0x65, 0x64, 0x54, 0x79, 0x70, 0x65, 0x12, 0x3d, 0x0a, 0x1b, + 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x61, + 0x64, 0x5f, 0x70, 0x6f, 0x6f, 0x6c, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x1b, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x18, 0x74, 0x72, 0x75, 0x6e, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x54, 0x68, 0x72, + 0x65, 0x61, 0x64, 0x50, 0x6f, 0x6f, 0x6c, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x35, 0x0a, 0x17, 0x62, + 0x61, 0x74, 0x63, 0x68, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x66, 0x6f, 0x72, 0x5f, 0x63, 0x72, + 0x65, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x05, 0x52, 0x14, 0x62, 0x61, + 0x74, 0x63, 0x68, 0x53, 0x69, 0x7a, 0x65, 0x46, 0x6f, 0x72, 0x43, 0x72, 0x65, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x12, 0x1d, 0x0a, 0x0a, 0x67, 0x72, 0x61, 0x70, 0x68, 0x5f, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x67, 0x72, 0x61, 0x70, 0x68, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x33, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x5f, 0x65, 0x64, 0x67, + 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x62, 0x61, 0x73, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, 0x64, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x42, 0x61, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x16, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, + 0x63, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x5f, 0x72, 0x61, 0x74, 0x65, + 0x18, 0x1f, 0x20, 0x01, 0x28, 0x05, 0x52, 0x13, 0x64, 0x79, 0x6e, 0x61, 0x6d, 0x69, 0x63, 0x45, + 0x64, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x52, 0x61, 0x74, 0x65, 0x12, 0x28, 0x0a, 0x10, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, + 0x20, 0x20, 0x01, 0x28, 0x02, 0x52, 0x0e, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x23, 0x0a, 0x0d, 0x6f, 0x75, 0x74, 0x67, 0x6f, 0x69, 0x6e, + 0x67, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x18, 0x21, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6f, 0x75, + 0x74, 0x67, 0x6f, 0x69, 0x6e, 0x67, 0x45, 0x64, 0x67, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, + 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x5f, 0x65, 0x64, 0x67, 0x65, 0x18, 0x22, 0x20, 0x01, 0x28, + 0x05, 0x52, 0x0c, 0x69, 0x6e, 0x63, 0x6f, 0x6d, 0x69, 0x6e, 0x67, 0x45, 0x64, 0x67, 0x65, 0x1a, + 0xbb, 0x01, 0x0a, 0x0e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, + 0x69, 0x6c, 0x12, 0x4c, 0x0a, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x32, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x79, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x69, + 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, + 0x1a, 0x5b, 0x0a, 0x0c, 0x44, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x35, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x49, 0x6e, 0x64, 0x65, 0x78, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0xef, 0x01, + 0x0a, 0x03, 0x50, 0x6f, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x61, 0x70, 0x70, 0x5f, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, + 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, + 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x12, 0x17, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x42, 0x07, + 0xba, 0x48, 0x04, 0x72, 0x02, 0x78, 0x01, 0x52, 0x02, 0x69, 0x70, 0x12, 0x26, 0x0a, 0x03, 0x63, + 0x70, 0x75, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, + 0x63, 0x70, 0x75, 0x12, 0x2f, 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, + 0x6d, 0x6f, 0x72, 0x79, 0x12, 0x29, 0x0a, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x18, 0x07, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x6e, 0x6f, 0x64, 0x65, 0x1a, + 0xe8, 0x01, 0x0a, 0x04, 0x4e, 0x6f, 0x64, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x41, 0x64, 0x64, + 0x72, 0x12, 0x23, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x61, 0x64, + 0x64, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x41, 0x64, 0x64, 0x72, 0x12, 0x26, 0x0a, 0x03, 0x63, 0x70, 0x75, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, + 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x43, 0x50, 0x55, 0x52, 0x03, 0x63, 0x70, 0x75, 0x12, 0x2f, + 0x0a, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x52, 0x06, 0x6d, 0x65, 0x6d, 0x6f, 0x72, 0x79, 0x12, + 0x29, 0x0a, 0x04, 0x50, 0x6f, 0x64, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, - 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, - 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, 0x6e, - 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, 0x6e, - 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, + 0x50, 0x6f, 0x64, 0x73, 0x52, 0x04, 0x50, 0x6f, 0x64, 0x73, 0x1a, 0x82, 0x02, 0x0a, 0x07, 0x53, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x70, 0x12, 0x1f, 0x0a, 0x0b, 0x63, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x70, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, + 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x49, 0x70, 0x73, 0x12, 0x32, 0x0a, 0x05, 0x70, 0x6f, + 0x72, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, + 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, + 0x69, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x52, 0x05, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x12, 0x2f, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, + 0x3e, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, + 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x52, 0x0b, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, + 0x35, 0x0a, 0x0b, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, + 0x52, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x1a, 0x80, 0x01, 0x0a, 0x06, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x12, 0x3b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x23, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, + 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, + 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x03, 0x43, 0x50, 0x55, - 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x4e, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, 0x79, - 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x3a, 0x0a, 0x04, 0x50, 0x6f, 0x64, 0x73, 0x12, 0x32, - 0x0a, 0x04, 0x70, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, - 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x50, - 0x6f, 0x64, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, 0x6f, - 0x64, 0x73, 0x1a, 0x3e, 0x0a, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x05, 0x6e, - 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4e, 0x6f, 0x64, - 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x05, 0x6e, 0x6f, 0x64, - 0x65, 0x73, 0x1a, 0x4a, 0x0a, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, 0x3e, - 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, - 0x66, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, - 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, 0x15, - 0x0a, 0x03, 0x49, 0x50, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x70, 0x22, 0x7a, 0x0a, 0x06, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, 0x1a, - 0x30, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, - 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, - 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, 0x72, - 0x74, 0x1a, 0x3e, 0x0a, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x33, 0x0a, 0x07, - 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x69, 0x72, 0x72, 0x6f, - 0x72, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x17, 0x0a, 0x03, 0x4b, 0x65, - 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, - 0x6b, 0x65, 0x79, 0x1a, 0x33, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x60, 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x26, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4d, - 0x65, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x61, - 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x42, 0x64, 0x0a, 0x1d, 0x6f, 0x72, 0x67, 0x2e, 0x76, 0x64, 0x61, 0x61, 0x73, - 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x0b, 0x56, 0x61, 0x6c, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x76, 0x64, 0x61, 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0xa2, - 0x02, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x9e, 0x01, 0x0a, 0x0b, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x0b, 0x61, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2d, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, + 0x2e, 0x41, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x41, 0x6e, 0x6e, + 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0b, 0x61, + 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x3e, 0x0a, 0x10, 0x41, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, + 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, + 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x1a, 0x4b, 0x0a, 0x03, 0x43, 0x50, + 0x55, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x4e, 0x0a, 0x06, 0x4d, 0x65, 0x6d, 0x6f, 0x72, + 0x79, 0x12, 0x14, 0x0a, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x14, 0x0a, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x05, 0x75, 0x73, 0x61, 0x67, 0x65, 0x1a, 0x3a, 0x0a, 0x04, 0x50, 0x6f, 0x64, 0x73, 0x12, + 0x32, 0x0a, 0x04, 0x70, 0x6f, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, + 0x50, 0x6f, 0x64, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x04, 0x70, + 0x6f, 0x64, 0x73, 0x1a, 0x3e, 0x0a, 0x05, 0x4e, 0x6f, 0x64, 0x65, 0x73, 0x12, 0x35, 0x0a, 0x05, + 0x6e, 0x6f, 0x64, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4e, 0x6f, + 0x64, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x05, 0x6e, 0x6f, + 0x64, 0x65, 0x73, 0x1a, 0x4a, 0x0a, 0x08, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x12, + 0x3e, 0x0a, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x66, 0x6f, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x08, 0xba, 0x48, 0x05, + 0x92, 0x01, 0x02, 0x08, 0x01, 0x52, 0x08, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x73, 0x1a, + 0x15, 0x0a, 0x03, 0x49, 0x50, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x70, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x70, 0x22, 0x7a, 0x0a, 0x06, 0x4d, 0x69, 0x72, 0x72, 0x6f, 0x72, + 0x1a, 0x30, 0x0a, 0x06, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, + 0x73, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, + 0x0a, 0x04, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x04, 0x70, 0x6f, + 0x72, 0x74, 0x1a, 0x3e, 0x0a, 0x07, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x12, 0x33, 0x0a, + 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, + 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x69, 0x72, 0x72, + 0x6f, 0x72, 0x2e, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, + 0x74, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x04, 0x4d, 0x65, 0x74, 0x61, 0x1a, 0x17, 0x0a, 0x03, 0x4b, + 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x1a, 0x33, 0x0a, 0x05, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, + 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x1a, 0x60, 0x0a, 0x08, 0x4b, 0x65, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x26, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, + 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x4b, 0x65, 0x79, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x2c, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x2e, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x07, 0x0a, 0x05, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x42, 0x64, 0x0a, 0x1d, 0x6f, 0x72, 0x67, 0x2e, 0x76, 0x64, 0x61, 0x61, + 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x70, 0x61, + 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x42, 0x0b, 0x56, 0x61, 0x6c, 0x64, 0x50, 0x61, 0x79, 0x6c, 0x6f, + 0x61, 0x64, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, + 0xa2, 0x02, 0x07, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( @@ -6535,7 +6658,7 @@ func file_v1_payload_payload_proto_rawDescGZIP() []byte { var ( file_v1_payload_payload_proto_enumTypes = make([]protoimpl.EnumInfo, 2) - file_v1_payload_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 100) + file_v1_payload_payload_proto_msgTypes = make([]protoimpl.MessageInfo, 101) file_v1_payload_payload_proto_goTypes = []any{ (Search_AggregationAlgorithm)(0), // 0: payload.v1.Search.AggregationAlgorithm (Remove_Timestamp_Operator)(0), // 1: payload.v1.Remove.Timestamp.Operator @@ -6574,76 +6697,78 @@ var ( (*Update_MultiRequest)(nil), // 34: payload.v1.Update.MultiRequest (*Update_ObjectRequest)(nil), // 35: payload.v1.Update.ObjectRequest (*Update_MultiObjectRequest)(nil), // 36: payload.v1.Update.MultiObjectRequest - (*Update_Config)(nil), // 37: payload.v1.Update.Config - (*Upsert_Request)(nil), // 38: payload.v1.Upsert.Request - (*Upsert_MultiRequest)(nil), // 39: payload.v1.Upsert.MultiRequest - (*Upsert_ObjectRequest)(nil), // 40: payload.v1.Upsert.ObjectRequest - (*Upsert_MultiObjectRequest)(nil), // 41: payload.v1.Upsert.MultiObjectRequest - (*Upsert_Config)(nil), // 42: payload.v1.Upsert.Config - (*Remove_Request)(nil), // 43: payload.v1.Remove.Request - (*Remove_MultiRequest)(nil), // 44: payload.v1.Remove.MultiRequest - (*Remove_TimestampRequest)(nil), // 45: payload.v1.Remove.TimestampRequest - (*Remove_Timestamp)(nil), // 46: payload.v1.Remove.Timestamp - (*Remove_Config)(nil), // 47: payload.v1.Remove.Config - (*Flush_Request)(nil), // 48: payload.v1.Flush.Request - (*Object_VectorRequest)(nil), // 49: payload.v1.Object.VectorRequest - (*Object_Distance)(nil), // 50: payload.v1.Object.Distance - (*Object_StreamDistance)(nil), // 51: payload.v1.Object.StreamDistance - (*Object_ID)(nil), // 52: payload.v1.Object.ID - (*Object_IDs)(nil), // 53: payload.v1.Object.IDs - (*Object_Vector)(nil), // 54: payload.v1.Object.Vector - (*Object_TimestampRequest)(nil), // 55: payload.v1.Object.TimestampRequest - (*Object_Timestamp)(nil), // 56: payload.v1.Object.Timestamp - (*Object_Vectors)(nil), // 57: payload.v1.Object.Vectors - (*Object_StreamVector)(nil), // 58: payload.v1.Object.StreamVector - (*Object_ReshapeVector)(nil), // 59: payload.v1.Object.ReshapeVector - (*Object_Blob)(nil), // 60: payload.v1.Object.Blob - (*Object_StreamBlob)(nil), // 61: payload.v1.Object.StreamBlob - (*Object_Location)(nil), // 62: payload.v1.Object.Location - (*Object_StreamLocation)(nil), // 63: payload.v1.Object.StreamLocation - (*Object_Locations)(nil), // 64: payload.v1.Object.Locations - (*Object_List)(nil), // 65: payload.v1.Object.List - (*Object_List_Request)(nil), // 66: payload.v1.Object.List.Request - (*Object_List_Response)(nil), // 67: payload.v1.Object.List.Response - (*Control_CreateIndexRequest)(nil), // 68: payload.v1.Control.CreateIndexRequest - (*Discoverer_Request)(nil), // 69: payload.v1.Discoverer.Request - (*Info_Index)(nil), // 70: payload.v1.Info.Index - (*Info_Pod)(nil), // 71: payload.v1.Info.Pod - (*Info_Node)(nil), // 72: payload.v1.Info.Node - (*Info_Service)(nil), // 73: payload.v1.Info.Service - (*Info_ServicePort)(nil), // 74: payload.v1.Info.ServicePort - (*Info_Labels)(nil), // 75: payload.v1.Info.Labels - (*Info_Annotations)(nil), // 76: payload.v1.Info.Annotations - (*Info_CPU)(nil), // 77: payload.v1.Info.CPU - (*Info_Memory)(nil), // 78: payload.v1.Info.Memory - (*Info_Pods)(nil), // 79: payload.v1.Info.Pods - (*Info_Nodes)(nil), // 80: payload.v1.Info.Nodes - (*Info_Services)(nil), // 81: payload.v1.Info.Services - (*Info_IPs)(nil), // 82: payload.v1.Info.IPs - (*Info_Index_Count)(nil), // 83: payload.v1.Info.Index.Count - (*Info_Index_Detail)(nil), // 84: payload.v1.Info.Index.Detail - (*Info_Index_UUID)(nil), // 85: payload.v1.Info.Index.UUID - (*Info_Index_Statistics)(nil), // 86: payload.v1.Info.Index.Statistics - (*Info_Index_StatisticsDetail)(nil), // 87: payload.v1.Info.Index.StatisticsDetail - (*Info_Index_Property)(nil), // 88: payload.v1.Info.Index.Property - (*Info_Index_PropertyDetail)(nil), // 89: payload.v1.Info.Index.PropertyDetail - nil, // 90: payload.v1.Info.Index.Detail.CountsEntry - (*Info_Index_UUID_Committed)(nil), // 91: payload.v1.Info.Index.UUID.Committed - (*Info_Index_UUID_Uncommitted)(nil), // 92: payload.v1.Info.Index.UUID.Uncommitted - nil, // 93: payload.v1.Info.Index.StatisticsDetail.DetailsEntry - nil, // 94: payload.v1.Info.Index.PropertyDetail.DetailsEntry - nil, // 95: payload.v1.Info.Labels.LabelsEntry - nil, // 96: payload.v1.Info.Annotations.AnnotationsEntry - (*Mirror_Target)(nil), // 97: payload.v1.Mirror.Target - (*Mirror_Targets)(nil), // 98: payload.v1.Mirror.Targets - (*Meta_Key)(nil), // 99: payload.v1.Meta.Key - (*Meta_Value)(nil), // 100: payload.v1.Meta.Value - (*Meta_KeyValue)(nil), // 101: payload.v1.Meta.KeyValue - (*wrapperspb.FloatValue)(nil), // 102: google.protobuf.FloatValue - (*status.Status)(nil), // 103: google.rpc.Status - (*anypb.Any)(nil), // 104: google.protobuf.Any + (*Update_TimestampRequest)(nil), // 37: payload.v1.Update.TimestampRequest + (*Update_Config)(nil), // 38: payload.v1.Update.Config + (*Upsert_Request)(nil), // 39: payload.v1.Upsert.Request + (*Upsert_MultiRequest)(nil), // 40: payload.v1.Upsert.MultiRequest + (*Upsert_ObjectRequest)(nil), // 41: payload.v1.Upsert.ObjectRequest + (*Upsert_MultiObjectRequest)(nil), // 42: payload.v1.Upsert.MultiObjectRequest + (*Upsert_Config)(nil), // 43: payload.v1.Upsert.Config + (*Remove_Request)(nil), // 44: payload.v1.Remove.Request + (*Remove_MultiRequest)(nil), // 45: payload.v1.Remove.MultiRequest + (*Remove_TimestampRequest)(nil), // 46: payload.v1.Remove.TimestampRequest + (*Remove_Timestamp)(nil), // 47: payload.v1.Remove.Timestamp + (*Remove_Config)(nil), // 48: payload.v1.Remove.Config + (*Flush_Request)(nil), // 49: payload.v1.Flush.Request + (*Object_VectorRequest)(nil), // 50: payload.v1.Object.VectorRequest + (*Object_Distance)(nil), // 51: payload.v1.Object.Distance + (*Object_StreamDistance)(nil), // 52: payload.v1.Object.StreamDistance + (*Object_ID)(nil), // 53: payload.v1.Object.ID + (*Object_IDs)(nil), // 54: payload.v1.Object.IDs + (*Object_Vector)(nil), // 55: payload.v1.Object.Vector + (*Object_TimestampRequest)(nil), // 56: payload.v1.Object.TimestampRequest + (*Object_Timestamp)(nil), // 57: payload.v1.Object.Timestamp + (*Object_Vectors)(nil), // 58: payload.v1.Object.Vectors + (*Object_StreamVector)(nil), // 59: payload.v1.Object.StreamVector + (*Object_ReshapeVector)(nil), // 60: payload.v1.Object.ReshapeVector + (*Object_Blob)(nil), // 61: payload.v1.Object.Blob + (*Object_StreamBlob)(nil), // 62: payload.v1.Object.StreamBlob + (*Object_Location)(nil), // 63: payload.v1.Object.Location + (*Object_StreamLocation)(nil), // 64: payload.v1.Object.StreamLocation + (*Object_Locations)(nil), // 65: payload.v1.Object.Locations + (*Object_List)(nil), // 66: payload.v1.Object.List + (*Object_List_Request)(nil), // 67: payload.v1.Object.List.Request + (*Object_List_Response)(nil), // 68: payload.v1.Object.List.Response + (*Control_CreateIndexRequest)(nil), // 69: payload.v1.Control.CreateIndexRequest + (*Discoverer_Request)(nil), // 70: payload.v1.Discoverer.Request + (*Info_Index)(nil), // 71: payload.v1.Info.Index + (*Info_Pod)(nil), // 72: payload.v1.Info.Pod + (*Info_Node)(nil), // 73: payload.v1.Info.Node + (*Info_Service)(nil), // 74: payload.v1.Info.Service + (*Info_ServicePort)(nil), // 75: payload.v1.Info.ServicePort + (*Info_Labels)(nil), // 76: payload.v1.Info.Labels + (*Info_Annotations)(nil), // 77: payload.v1.Info.Annotations + (*Info_CPU)(nil), // 78: payload.v1.Info.CPU + (*Info_Memory)(nil), // 79: payload.v1.Info.Memory + (*Info_Pods)(nil), // 80: payload.v1.Info.Pods + (*Info_Nodes)(nil), // 81: payload.v1.Info.Nodes + (*Info_Services)(nil), // 82: payload.v1.Info.Services + (*Info_IPs)(nil), // 83: payload.v1.Info.IPs + (*Info_Index_Count)(nil), // 84: payload.v1.Info.Index.Count + (*Info_Index_Detail)(nil), // 85: payload.v1.Info.Index.Detail + (*Info_Index_UUID)(nil), // 86: payload.v1.Info.Index.UUID + (*Info_Index_Statistics)(nil), // 87: payload.v1.Info.Index.Statistics + (*Info_Index_StatisticsDetail)(nil), // 88: payload.v1.Info.Index.StatisticsDetail + (*Info_Index_Property)(nil), // 89: payload.v1.Info.Index.Property + (*Info_Index_PropertyDetail)(nil), // 90: payload.v1.Info.Index.PropertyDetail + nil, // 91: payload.v1.Info.Index.Detail.CountsEntry + (*Info_Index_UUID_Committed)(nil), // 92: payload.v1.Info.Index.UUID.Committed + (*Info_Index_UUID_Uncommitted)(nil), // 93: payload.v1.Info.Index.UUID.Uncommitted + nil, // 94: payload.v1.Info.Index.StatisticsDetail.DetailsEntry + nil, // 95: payload.v1.Info.Index.PropertyDetail.DetailsEntry + nil, // 96: payload.v1.Info.Labels.LabelsEntry + nil, // 97: payload.v1.Info.Annotations.AnnotationsEntry + (*Mirror_Target)(nil), // 98: payload.v1.Mirror.Target + (*Mirror_Targets)(nil), // 99: payload.v1.Mirror.Targets + (*Meta_Key)(nil), // 100: payload.v1.Meta.Key + (*Meta_Value)(nil), // 101: payload.v1.Meta.Value + (*Meta_KeyValue)(nil), // 102: payload.v1.Meta.KeyValue + (*wrapperspb.FloatValue)(nil), // 103: google.protobuf.FloatValue + (*status.Status)(nil), // 104: google.rpc.Status + (*anypb.Any)(nil), // 105: google.protobuf.Any } ) + var file_v1_payload_payload_proto_depIdxs = []int32{ 22, // 0: payload.v1.Search.Request.config:type_name -> payload.v1.Search.Config 16, // 1: payload.v1.Search.MultiRequest.requests:type_name -> payload.v1.Search.Request @@ -6655,80 +6780,80 @@ var file_v1_payload_payload_proto_depIdxs = []int32{ 27, // 7: payload.v1.Search.Config.ingress_filters:type_name -> payload.v1.Filter.Config 27, // 8: payload.v1.Search.Config.egress_filters:type_name -> payload.v1.Filter.Config 0, // 9: payload.v1.Search.Config.aggregation_algorithm:type_name -> payload.v1.Search.AggregationAlgorithm - 102, // 10: payload.v1.Search.Config.ratio:type_name -> google.protobuf.FloatValue - 50, // 11: payload.v1.Search.Response.results:type_name -> payload.v1.Object.Distance + 103, // 10: payload.v1.Search.Config.ratio:type_name -> google.protobuf.FloatValue + 51, // 11: payload.v1.Search.Response.results:type_name -> payload.v1.Object.Distance 23, // 12: payload.v1.Search.Responses.responses:type_name -> payload.v1.Search.Response 23, // 13: payload.v1.Search.StreamResponse.response:type_name -> payload.v1.Search.Response - 103, // 14: payload.v1.Search.StreamResponse.status:type_name -> google.rpc.Status + 104, // 14: payload.v1.Search.StreamResponse.status:type_name -> google.rpc.Status 26, // 15: payload.v1.Filter.Config.targets:type_name -> payload.v1.Filter.Target - 54, // 16: payload.v1.Insert.Request.vector:type_name -> payload.v1.Object.Vector + 55, // 16: payload.v1.Insert.Request.vector:type_name -> payload.v1.Object.Vector 32, // 17: payload.v1.Insert.Request.config:type_name -> payload.v1.Insert.Config 28, // 18: payload.v1.Insert.MultiRequest.requests:type_name -> payload.v1.Insert.Request - 60, // 19: payload.v1.Insert.ObjectRequest.object:type_name -> payload.v1.Object.Blob + 61, // 19: payload.v1.Insert.ObjectRequest.object:type_name -> payload.v1.Object.Blob 32, // 20: payload.v1.Insert.ObjectRequest.config:type_name -> payload.v1.Insert.Config 26, // 21: payload.v1.Insert.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target 30, // 22: payload.v1.Insert.MultiObjectRequest.requests:type_name -> payload.v1.Insert.ObjectRequest 27, // 23: payload.v1.Insert.Config.filters:type_name -> payload.v1.Filter.Config - 54, // 24: payload.v1.Update.Request.vector:type_name -> payload.v1.Object.Vector - 37, // 25: payload.v1.Update.Request.config:type_name -> payload.v1.Update.Config + 55, // 24: payload.v1.Update.Request.vector:type_name -> payload.v1.Object.Vector + 38, // 25: payload.v1.Update.Request.config:type_name -> payload.v1.Update.Config 33, // 26: payload.v1.Update.MultiRequest.requests:type_name -> payload.v1.Update.Request - 60, // 27: payload.v1.Update.ObjectRequest.object:type_name -> payload.v1.Object.Blob - 37, // 28: payload.v1.Update.ObjectRequest.config:type_name -> payload.v1.Update.Config + 61, // 27: payload.v1.Update.ObjectRequest.object:type_name -> payload.v1.Object.Blob + 38, // 28: payload.v1.Update.ObjectRequest.config:type_name -> payload.v1.Update.Config 26, // 29: payload.v1.Update.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target 35, // 30: payload.v1.Update.MultiObjectRequest.requests:type_name -> payload.v1.Update.ObjectRequest 27, // 31: payload.v1.Update.Config.filters:type_name -> payload.v1.Filter.Config - 54, // 32: payload.v1.Upsert.Request.vector:type_name -> payload.v1.Object.Vector - 42, // 33: payload.v1.Upsert.Request.config:type_name -> payload.v1.Upsert.Config - 38, // 34: payload.v1.Upsert.MultiRequest.requests:type_name -> payload.v1.Upsert.Request - 60, // 35: payload.v1.Upsert.ObjectRequest.object:type_name -> payload.v1.Object.Blob - 42, // 36: payload.v1.Upsert.ObjectRequest.config:type_name -> payload.v1.Upsert.Config + 55, // 32: payload.v1.Upsert.Request.vector:type_name -> payload.v1.Object.Vector + 43, // 33: payload.v1.Upsert.Request.config:type_name -> payload.v1.Upsert.Config + 39, // 34: payload.v1.Upsert.MultiRequest.requests:type_name -> payload.v1.Upsert.Request + 61, // 35: payload.v1.Upsert.ObjectRequest.object:type_name -> payload.v1.Object.Blob + 43, // 36: payload.v1.Upsert.ObjectRequest.config:type_name -> payload.v1.Upsert.Config 26, // 37: payload.v1.Upsert.ObjectRequest.vectorizer:type_name -> payload.v1.Filter.Target - 40, // 38: payload.v1.Upsert.MultiObjectRequest.requests:type_name -> payload.v1.Upsert.ObjectRequest + 41, // 38: payload.v1.Upsert.MultiObjectRequest.requests:type_name -> payload.v1.Upsert.ObjectRequest 27, // 39: payload.v1.Upsert.Config.filters:type_name -> payload.v1.Filter.Config - 52, // 40: payload.v1.Remove.Request.id:type_name -> payload.v1.Object.ID - 47, // 41: payload.v1.Remove.Request.config:type_name -> payload.v1.Remove.Config - 43, // 42: payload.v1.Remove.MultiRequest.requests:type_name -> payload.v1.Remove.Request - 46, // 43: payload.v1.Remove.TimestampRequest.timestamps:type_name -> payload.v1.Remove.Timestamp + 53, // 40: payload.v1.Remove.Request.id:type_name -> payload.v1.Object.ID + 48, // 41: payload.v1.Remove.Request.config:type_name -> payload.v1.Remove.Config + 44, // 42: payload.v1.Remove.MultiRequest.requests:type_name -> payload.v1.Remove.Request + 47, // 43: payload.v1.Remove.TimestampRequest.timestamps:type_name -> payload.v1.Remove.Timestamp 1, // 44: payload.v1.Remove.Timestamp.operator:type_name -> payload.v1.Remove.Timestamp.Operator - 52, // 45: payload.v1.Object.VectorRequest.id:type_name -> payload.v1.Object.ID + 53, // 45: payload.v1.Object.VectorRequest.id:type_name -> payload.v1.Object.ID 27, // 46: payload.v1.Object.VectorRequest.filters:type_name -> payload.v1.Filter.Config - 50, // 47: payload.v1.Object.StreamDistance.distance:type_name -> payload.v1.Object.Distance - 103, // 48: payload.v1.Object.StreamDistance.status:type_name -> google.rpc.Status - 52, // 49: payload.v1.Object.TimestampRequest.id:type_name -> payload.v1.Object.ID - 54, // 50: payload.v1.Object.Vectors.vectors:type_name -> payload.v1.Object.Vector - 54, // 51: payload.v1.Object.StreamVector.vector:type_name -> payload.v1.Object.Vector - 103, // 52: payload.v1.Object.StreamVector.status:type_name -> google.rpc.Status - 60, // 53: payload.v1.Object.StreamBlob.blob:type_name -> payload.v1.Object.Blob - 103, // 54: payload.v1.Object.StreamBlob.status:type_name -> google.rpc.Status - 62, // 55: payload.v1.Object.StreamLocation.location:type_name -> payload.v1.Object.Location - 103, // 56: payload.v1.Object.StreamLocation.status:type_name -> google.rpc.Status - 62, // 57: payload.v1.Object.Locations.locations:type_name -> payload.v1.Object.Location - 54, // 58: payload.v1.Object.List.Response.vector:type_name -> payload.v1.Object.Vector - 103, // 59: payload.v1.Object.List.Response.status:type_name -> google.rpc.Status - 77, // 60: payload.v1.Info.Pod.cpu:type_name -> payload.v1.Info.CPU - 78, // 61: payload.v1.Info.Pod.memory:type_name -> payload.v1.Info.Memory - 72, // 62: payload.v1.Info.Pod.node:type_name -> payload.v1.Info.Node - 77, // 63: payload.v1.Info.Node.cpu:type_name -> payload.v1.Info.CPU - 78, // 64: payload.v1.Info.Node.memory:type_name -> payload.v1.Info.Memory - 79, // 65: payload.v1.Info.Node.Pods:type_name -> payload.v1.Info.Pods - 74, // 66: payload.v1.Info.Service.ports:type_name -> payload.v1.Info.ServicePort - 75, // 67: payload.v1.Info.Service.labels:type_name -> payload.v1.Info.Labels - 76, // 68: payload.v1.Info.Service.annotations:type_name -> payload.v1.Info.Annotations - 95, // 69: payload.v1.Info.Labels.labels:type_name -> payload.v1.Info.Labels.LabelsEntry - 96, // 70: payload.v1.Info.Annotations.annotations:type_name -> payload.v1.Info.Annotations.AnnotationsEntry - 71, // 71: payload.v1.Info.Pods.pods:type_name -> payload.v1.Info.Pod - 72, // 72: payload.v1.Info.Nodes.nodes:type_name -> payload.v1.Info.Node - 73, // 73: payload.v1.Info.Services.services:type_name -> payload.v1.Info.Service - 90, // 74: payload.v1.Info.Index.Detail.counts:type_name -> payload.v1.Info.Index.Detail.CountsEntry - 93, // 75: payload.v1.Info.Index.StatisticsDetail.details:type_name -> payload.v1.Info.Index.StatisticsDetail.DetailsEntry - 94, // 76: payload.v1.Info.Index.PropertyDetail.details:type_name -> payload.v1.Info.Index.PropertyDetail.DetailsEntry - 83, // 77: payload.v1.Info.Index.Detail.CountsEntry.value:type_name -> payload.v1.Info.Index.Count - 86, // 78: payload.v1.Info.Index.StatisticsDetail.DetailsEntry.value:type_name -> payload.v1.Info.Index.Statistics - 88, // 79: payload.v1.Info.Index.PropertyDetail.DetailsEntry.value:type_name -> payload.v1.Info.Index.Property - 97, // 80: payload.v1.Mirror.Targets.targets:type_name -> payload.v1.Mirror.Target - 104, // 81: payload.v1.Meta.Value.value:type_name -> google.protobuf.Any - 99, // 82: payload.v1.Meta.KeyValue.key:type_name -> payload.v1.Meta.Key - 100, // 83: payload.v1.Meta.KeyValue.value:type_name -> payload.v1.Meta.Value + 51, // 47: payload.v1.Object.StreamDistance.distance:type_name -> payload.v1.Object.Distance + 104, // 48: payload.v1.Object.StreamDistance.status:type_name -> google.rpc.Status + 53, // 49: payload.v1.Object.TimestampRequest.id:type_name -> payload.v1.Object.ID + 55, // 50: payload.v1.Object.Vectors.vectors:type_name -> payload.v1.Object.Vector + 55, // 51: payload.v1.Object.StreamVector.vector:type_name -> payload.v1.Object.Vector + 104, // 52: payload.v1.Object.StreamVector.status:type_name -> google.rpc.Status + 61, // 53: payload.v1.Object.StreamBlob.blob:type_name -> payload.v1.Object.Blob + 104, // 54: payload.v1.Object.StreamBlob.status:type_name -> google.rpc.Status + 63, // 55: payload.v1.Object.StreamLocation.location:type_name -> payload.v1.Object.Location + 104, // 56: payload.v1.Object.StreamLocation.status:type_name -> google.rpc.Status + 63, // 57: payload.v1.Object.Locations.locations:type_name -> payload.v1.Object.Location + 55, // 58: payload.v1.Object.List.Response.vector:type_name -> payload.v1.Object.Vector + 104, // 59: payload.v1.Object.List.Response.status:type_name -> google.rpc.Status + 78, // 60: payload.v1.Info.Pod.cpu:type_name -> payload.v1.Info.CPU + 79, // 61: payload.v1.Info.Pod.memory:type_name -> payload.v1.Info.Memory + 73, // 62: payload.v1.Info.Pod.node:type_name -> payload.v1.Info.Node + 78, // 63: payload.v1.Info.Node.cpu:type_name -> payload.v1.Info.CPU + 79, // 64: payload.v1.Info.Node.memory:type_name -> payload.v1.Info.Memory + 80, // 65: payload.v1.Info.Node.Pods:type_name -> payload.v1.Info.Pods + 75, // 66: payload.v1.Info.Service.ports:type_name -> payload.v1.Info.ServicePort + 76, // 67: payload.v1.Info.Service.labels:type_name -> payload.v1.Info.Labels + 77, // 68: payload.v1.Info.Service.annotations:type_name -> payload.v1.Info.Annotations + 96, // 69: payload.v1.Info.Labels.labels:type_name -> payload.v1.Info.Labels.LabelsEntry + 97, // 70: payload.v1.Info.Annotations.annotations:type_name -> payload.v1.Info.Annotations.AnnotationsEntry + 72, // 71: payload.v1.Info.Pods.pods:type_name -> payload.v1.Info.Pod + 73, // 72: payload.v1.Info.Nodes.nodes:type_name -> payload.v1.Info.Node + 74, // 73: payload.v1.Info.Services.services:type_name -> payload.v1.Info.Service + 91, // 74: payload.v1.Info.Index.Detail.counts:type_name -> payload.v1.Info.Index.Detail.CountsEntry + 94, // 75: payload.v1.Info.Index.StatisticsDetail.details:type_name -> payload.v1.Info.Index.StatisticsDetail.DetailsEntry + 95, // 76: payload.v1.Info.Index.PropertyDetail.details:type_name -> payload.v1.Info.Index.PropertyDetail.DetailsEntry + 84, // 77: payload.v1.Info.Index.Detail.CountsEntry.value:type_name -> payload.v1.Info.Index.Count + 87, // 78: payload.v1.Info.Index.StatisticsDetail.DetailsEntry.value:type_name -> payload.v1.Info.Index.Statistics + 89, // 79: payload.v1.Info.Index.PropertyDetail.DetailsEntry.value:type_name -> payload.v1.Info.Index.Property + 98, // 80: payload.v1.Mirror.Targets.targets:type_name -> payload.v1.Mirror.Target + 105, // 81: payload.v1.Meta.Value.value:type_name -> google.protobuf.Any + 100, // 82: payload.v1.Meta.KeyValue.key:type_name -> payload.v1.Meta.Key + 101, // 83: payload.v1.Meta.KeyValue.value:type_name -> payload.v1.Meta.Value 84, // [84:84] is the sub-list for method output_type 84, // [84:84] is the sub-list for method input_type 84, // [84:84] is the sub-list for extension type_name @@ -6745,23 +6870,23 @@ func file_v1_payload_payload_proto_init() { (*Search_StreamResponse_Response)(nil), (*Search_StreamResponse_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[49].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[50].OneofWrappers = []any{ (*Object_StreamDistance_Distance)(nil), (*Object_StreamDistance_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[56].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[57].OneofWrappers = []any{ (*Object_StreamVector_Vector)(nil), (*Object_StreamVector_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[59].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[60].OneofWrappers = []any{ (*Object_StreamBlob_Blob)(nil), (*Object_StreamBlob_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[61].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[62].OneofWrappers = []any{ (*Object_StreamLocation_Location)(nil), (*Object_StreamLocation_Status)(nil), } - file_v1_payload_payload_proto_msgTypes[65].OneofWrappers = []any{ + file_v1_payload_payload_proto_msgTypes[66].OneofWrappers = []any{ (*Object_List_Response_Vector)(nil), (*Object_List_Response_Status)(nil), } @@ -6771,7 +6896,7 @@ func file_v1_payload_payload_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_v1_payload_payload_proto_rawDesc, NumEnums: 2, - NumMessages: 100, + NumMessages: 101, NumExtensions: 0, NumServices: 0, }, diff --git a/apis/grpc/v1/payload/payload.pb.json.go b/apis/grpc/v1/payload/payload.pb.json.go index f719a1325a..f257750f07 100644 --- a/apis/grpc/v1/payload/payload.pb.json.go +++ b/apis/grpc/v1/payload/payload.pb.json.go @@ -271,6 +271,16 @@ func (msg *Update_MultiObjectRequest) UnmarshalJSON(b []byte) error { return protojson.UnmarshalOptions{}.Unmarshal(b, msg) } +// MarshalJSON implements json.Marshaler +func (msg *Update_TimestampRequest) MarshalJSON() ([]byte, error) { + return protojson.MarshalOptions{}.Marshal(msg) +} + +// UnmarshalJSON implements json.Unmarshaler +func (msg *Update_TimestampRequest) UnmarshalJSON(b []byte) error { + return protojson.UnmarshalOptions{}.Unmarshal(b, msg) +} + // MarshalJSON implements json.Marshaler func (msg *Update_Config) MarshalJSON() ([]byte, error) { return protojson.MarshalOptions{}.Marshal(msg) diff --git a/apis/grpc/v1/payload/payload_vtproto.pb.go b/apis/grpc/v1/payload/payload_vtproto.pb.go index a0482dc614..7cdfc1dc32 100644 --- a/apis/grpc/v1/payload/payload_vtproto.pb.go +++ b/apis/grpc/v1/payload/payload_vtproto.pb.go @@ -564,6 +564,25 @@ func (m *Update_MultiObjectRequest) CloneMessageVT() proto.Message { return m.CloneVT() } +func (m *Update_TimestampRequest) CloneVT() *Update_TimestampRequest { + if m == nil { + return (*Update_TimestampRequest)(nil) + } + r := new(Update_TimestampRequest) + r.Id = m.Id + r.Timestamp = m.Timestamp + r.Force = m.Force + if len(m.unknownFields) > 0 { + r.unknownFields = make([]byte, len(m.unknownFields)) + copy(r.unknownFields, m.unknownFields) + } + return r +} + +func (m *Update_TimestampRequest) CloneMessageVT() proto.Message { + return m.CloneVT() +} + func (m *Update_Config) CloneVT() *Update_Config { if m == nil { return (*Update_Config)(nil) @@ -2905,6 +2924,32 @@ func (this *Update_MultiObjectRequest) EqualMessageVT(thatMsg proto.Message) boo return this.EqualVT(that) } +func (this *Update_TimestampRequest) EqualVT(that *Update_TimestampRequest) bool { + if this == that { + return true + } else if this == nil || that == nil { + return false + } + if this.Id != that.Id { + return false + } + if this.Timestamp != that.Timestamp { + return false + } + if this.Force != that.Force { + return false + } + return string(this.unknownFields) == string(that.unknownFields) +} + +func (this *Update_TimestampRequest) EqualMessageVT(thatMsg proto.Message) bool { + that, ok := thatMsg.(*Update_TimestampRequest) + if !ok { + return false + } + return this.EqualVT(that) +} + func (this *Update_Config) EqualVT(that *Update_Config) bool { if this == that { return true @@ -6476,6 +6521,61 @@ func (m *Update_MultiObjectRequest) MarshalToSizedBufferVT(dAtA []byte) (int, er return len(dAtA) - i, nil } +func (m *Update_TimestampRequest) MarshalVT() (dAtA []byte, err error) { + if m == nil { + return nil, nil + } + size := m.SizeVT() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBufferVT(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Update_TimestampRequest) MarshalToVT(dAtA []byte) (int, error) { + size := m.SizeVT() + return m.MarshalToSizedBufferVT(dAtA[:size]) +} + +func (m *Update_TimestampRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { + if m == nil { + return 0, nil + } + i := len(dAtA) + _ = i + var l int + _ = l + if m.unknownFields != nil { + i -= len(m.unknownFields) + copy(dAtA[i:], m.unknownFields) + } + if m.Force { + i-- + if m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Timestamp != 0 { + i = protohelpers.EncodeVarint(dAtA, i, uint64(m.Timestamp)) + i-- + dAtA[i] = 0x10 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = protohelpers.EncodeVarint(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Update_Config) MarshalVT() (dAtA []byte, err error) { if m == nil { return nil, nil @@ -10975,6 +11075,26 @@ func (m *Update_MultiObjectRequest) SizeVT() (n int) { return n } +func (m *Update_TimestampRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + protohelpers.SizeOfVarint(uint64(l)) + } + if m.Timestamp != 0 { + n += 1 + protohelpers.SizeOfVarint(uint64(m.Timestamp)) + } + if m.Force { + n += 2 + } + n += len(m.unknownFields) + return n +} + func (m *Update_Config) SizeVT() (n int) { if m == nil { return 0 @@ -15243,6 +15363,129 @@ func (m *Update_MultiObjectRequest) UnmarshalVT(dAtA []byte) error { return nil } +func (m *Update_TimestampRequest) UnmarshalVT(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Update_TimestampRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Update_TimestampRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return protohelpers.ErrInvalidLength + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return protohelpers.ErrInvalidLength + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + m.Timestamp = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Timestamp |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return protohelpers.ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Force = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := protohelpers.Skip(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return protohelpers.ErrInvalidLength + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + func (m *Update_Config) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/apis/grpc/v1/rpc/errdetails/error_details.pb.go b/apis/grpc/v1/rpc/errdetails/error_details.pb.go index 19097b3f77..00db9a7461 100644 --- a/apis/grpc/v1/rpc/errdetails/error_details.pb.go +++ b/apis/grpc/v1/rpc/errdetails/error_details.pb.go @@ -17,7 +17,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/rpc/errdetails/error_details.proto @@ -64,20 +64,23 @@ const ( // } // } type ErrorInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The reason of the error. This is a constant value that identifies the // proximate cause of the error. Error reasons are unique within a particular // domain of errors. This should be at most 63 characters and match a // regular expression of `[A-Z][A-Z0-9_]+[A-Z0-9]`, which represents // UPPER_SNAKE_CASE. - Reason string ` protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` // The logical grouping to which the "reason" belongs. The error domain // is typically the registered service name of the tool or product that // generates the error. Example: "pubsub.googleapis.com". If the error is // generated by some common infrastructure, the error domain must be a // globally unique value that identifies the infrastructure. For Google API // infrastructure, the error domain is "googleapis.com". - Domain string ` protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` // Additional structured details about this error. // // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in @@ -86,9 +89,7 @@ type ErrorInfo struct { // {"instanceLimit": "100/request"}, should be returned as, // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of // instances that can be created in a single (batch) request. - Metadata map[string]string ` protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (x *ErrorInfo) Reset() { @@ -156,11 +157,12 @@ func (x *ErrorInfo) GetMetadata() map[string]string { // number of retries have been reached or a maximum retry delay cap has been // reached. type RetryInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Clients should wait at least this long between retrying the same request. - RetryDelay *durationpb.Duration ` protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Clients should wait at least this long between retrying the same request. + RetryDelay *durationpb.Duration `protobuf:"bytes,1,opt,name=retry_delay,json=retryDelay,proto3" json:"retry_delay,omitempty"` } func (x *RetryInfo) Reset() { @@ -202,13 +204,14 @@ func (x *RetryInfo) GetRetryDelay() *durationpb.Duration { // Describes additional debugging info. type DebugInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The stack trace entries indicating where the error occurred. - StackEntries []string ` protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` + StackEntries []string `protobuf:"bytes,1,rep,name=stack_entries,json=stackEntries,proto3" json:"stack_entries,omitempty"` // Additional debugging information provided by the server. - Detail string ` protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Detail string `protobuf:"bytes,2,opt,name=detail,proto3" json:"detail,omitempty"` } func (x *DebugInfo) Reset() { @@ -267,11 +270,12 @@ func (x *DebugInfo) GetDetail() string { // Also see RetryInfo and Help types for other details about handling a // quota failure. type QuotaFailure struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Describes all quota violations. - Violations []*QuotaFailure_Violation ` protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all quota violations. + Violations []*QuotaFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` } func (x *QuotaFailure) Reset() { @@ -317,11 +321,12 @@ func (x *QuotaFailure) GetViolations() []*QuotaFailure_Violation { // acknowledged, it could list the terms of service violation in the // PreconditionFailure message. type PreconditionFailure struct { - state protoimpl.MessageState `protogen:"open.v1"` - // Describes all precondition violations. - Violations []*PreconditionFailure_Violation ` protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Describes all precondition violations. + Violations []*PreconditionFailure_Violation `protobuf:"bytes,1,rep,name=violations,proto3" json:"violations,omitempty"` } func (x *PreconditionFailure) Reset() { @@ -364,11 +369,12 @@ func (x *PreconditionFailure) GetViolations() []*PreconditionFailure_Violation { // Describes violations in a client request. This error type focuses on the // syntactic aspects of the request. type BadRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Describes all violations in a client request. - FieldViolations []*BadRequest_FieldViolation ` protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + FieldViolations []*BadRequest_FieldViolation `protobuf:"bytes,1,rep,name=field_violations,json=fieldViolations,proto3" json:"field_violations,omitempty"` } func (x *BadRequest) Reset() { @@ -411,15 +417,16 @@ func (x *BadRequest) GetFieldViolations() []*BadRequest_FieldViolation { // Contains metadata about the request that clients can attach when filing a bug // or providing other forms of feedback. type RequestInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // An opaque string that should only be interpreted by the service generating // it. For example, it can be used to identify requests in the service's logs. - RequestId string ` protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` + RequestId string `protobuf:"bytes,1,opt,name=request_id,json=requestId,proto3" json:"request_id,omitempty"` // Any data that was used to serve this request. For example, an encrypted // stack trace that can be sent back to the service provider for debugging. - ServingData string ` protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + ServingData string `protobuf:"bytes,2,opt,name=serving_data,json=servingData,proto3" json:"serving_data,omitempty"` } func (x *RequestInfo) Reset() { @@ -468,26 +475,27 @@ func (x *RequestInfo) GetServingData() string { // Describes the resource that is being accessed. type ResourceInfo struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A name for the type of resource being accessed, e.g. "sql table", // "cloud storage bucket", "file", "Google calendar"; or the type URL // of the resource: e.g. "type.googleapis.com/google.pubsub.v1.Topic". - ResourceType string ` protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` + ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current // error is // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. - ResourceName string ` protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` + ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The owner of the resource (optional). // For example, "user:" or "project:". - Owner string ` protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` + Owner string `protobuf:"bytes,3,opt,name=owner,proto3" json:"owner,omitempty"` // Describes what error is encountered when accessing this resource. // For example, updating a cloud project may require the `writer` permission // on the developer console project. - Description string ` protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` } func (x *ResourceInfo) Reset() { @@ -554,11 +562,12 @@ func (x *ResourceInfo) GetDescription() string { // project hasn't enabled the accessed service, this can contain a URL pointing // directly to the right place in the developer console to flip the bit. type Help struct { - state protoimpl.MessageState `protogen:"open.v1"` - // URL(s) pointing to additional information on handling the current error. - Links []*Help_Link ` protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` - unknownFields protoimpl.UnknownFields + state protoimpl.MessageState sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // URL(s) pointing to additional information on handling the current error. + Links []*Help_Link `protobuf:"bytes,1,rep,name=links,proto3" json:"links,omitempty"` } func (x *Help) Reset() { @@ -601,15 +610,16 @@ func (x *Help) GetLinks() []*Help_Link { // Provides a localized error message that is safe to return to the user // which can be attached to an RPC error. type LocalizedMessage struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The locale used following the specification defined at // https://www.rfc-editor.org/rfc/bcp/bcp47.txt. // Examples are: "en-US", "fr-CH", "es-MX" - Locale string ` protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` + Locale string `protobuf:"bytes,1,opt,name=locale,proto3" json:"locale,omitempty"` // The localized error message in the above locale. - Message string ` protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` } func (x *LocalizedMessage) Reset() { @@ -659,11 +669,14 @@ func (x *LocalizedMessage) GetMessage() string { // A message type used to describe a single quota violation. For example, a // daily quota or a custom quota that was exceeded. type QuotaFailure_Violation struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The subject on which the quota check failed. // For example, "clientip:" or "project:". - Subject string ` protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` + Subject string `protobuf:"bytes,1,opt,name=subject,proto3" json:"subject,omitempty"` // A description of how the quota check failed. Clients can use this // description to find more about the quota configuration in the service's // public documentation, or find the relevant quota limit to adjust through @@ -671,9 +684,7 @@ type QuotaFailure_Violation struct { // // For example: "Service disabled" or "Daily Limit for read operations // exceeded". - Description string ` protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` } func (x *QuotaFailure_Violation) Reset() { @@ -722,22 +733,23 @@ func (x *QuotaFailure_Violation) GetDescription() string { // A message type used to describe a single precondition failure. type PreconditionFailure_Violation struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // The type of PreconditionFailure. We recommend using a service-specific // enum type to define the supported precondition violation subjects. For // example, "TOS" for "Terms of Service violation". - Type string ` protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // The subject, relative to the type, that failed. // For example, "google.com/cloud" relative to the "TOS" type would indicate // which terms of service is being referenced. - Subject string ` protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` + Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` // A description of how the precondition failed. Developers can use this // description to understand how to fix the failure. // // For example: "Terms of service not accepted". - Description string ` protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` } func (x *PreconditionFailure_Violation) Reset() { @@ -793,7 +805,10 @@ func (x *PreconditionFailure_Violation) GetDescription() string { // A message type used to describe a single bad request field. type BadRequest_FieldViolation struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // A path that leads to a field in the request body. The value will be a // sequence of dot-separated identifiers that identify a protocol buffer // field. @@ -831,11 +846,9 @@ type BadRequest_FieldViolation struct { // first `emailAddresses` message // - `emailAddresses[3].type[2]` for a violation in the second `type` // value in the third `emailAddresses` message. - Field string ` protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` + Field string `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"` // A description of why the request element is bad. - Description string ` protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` } func (x *BadRequest_FieldViolation) Reset() { @@ -884,13 +897,14 @@ func (x *BadRequest_FieldViolation) GetDescription() string { // Describes a URL link. type Help_Link struct { - state protoimpl.MessageState `protogen:"open.v1"` + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + // Describes what the link offers. - Description string ` protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` // The URL of the link. - Url string ` protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` } func (x *Help_Link) Reset() { @@ -1066,6 +1080,7 @@ var ( (*durationpb.Duration)(nil), // 15: google.protobuf.Duration } ) + var file_v1_rpc_errdetails_error_details_proto_depIdxs = []int32{ 10, // 0: rpc.v1.ErrorInfo.metadata:type_name -> rpc.v1.ErrorInfo.MetadataEntry 15, // 1: rpc.v1.RetryInfo.retry_delay:type_name -> google.protobuf.Duration diff --git a/apis/grpc/v1/vald/filter.pb.go b/apis/grpc/v1/vald/filter.pb.go index 8956b67d6b..0a8aee984c 100644 --- a/apis/grpc/v1/vald/filter.pb.go +++ b/apis/grpc/v1/vald/filter.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/filter.proto diff --git a/apis/grpc/v1/vald/flush.pb.go b/apis/grpc/v1/vald/flush.pb.go index f914fae377..dd66159f02 100644 --- a/apis/grpc/v1/vald/flush.pb.go +++ b/apis/grpc/v1/vald/flush.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/flush.proto diff --git a/apis/grpc/v1/vald/index.pb.go b/apis/grpc/v1/vald/index.pb.go index ffec97eb1c..427d871f74 100644 --- a/apis/grpc/v1/vald/index.pb.go +++ b/apis/grpc/v1/vald/index.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/index.proto diff --git a/apis/grpc/v1/vald/insert.pb.go b/apis/grpc/v1/vald/insert.pb.go index 560ebb9e71..045f01a445 100644 --- a/apis/grpc/v1/vald/insert.pb.go +++ b/apis/grpc/v1/vald/insert.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/insert.proto diff --git a/apis/grpc/v1/vald/object.pb.go b/apis/grpc/v1/vald/object.pb.go index a6ff362111..9ae363216d 100644 --- a/apis/grpc/v1/vald/object.pb.go +++ b/apis/grpc/v1/vald/object.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/object.proto diff --git a/apis/grpc/v1/vald/remove.pb.go b/apis/grpc/v1/vald/remove.pb.go index f9cba0fe50..962a467c7a 100644 --- a/apis/grpc/v1/vald/remove.pb.go +++ b/apis/grpc/v1/vald/remove.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/remove.proto diff --git a/apis/grpc/v1/vald/search.pb.go b/apis/grpc/v1/vald/search.pb.go index 7875a94bd0..cb3c636ce4 100644 --- a/apis/grpc/v1/vald/search.pb.go +++ b/apis/grpc/v1/vald/search.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/search.proto diff --git a/apis/grpc/v1/vald/update.pb.go b/apis/grpc/v1/vald/update.pb.go index 411e600045..4bea4d79cb 100644 --- a/apis/grpc/v1/vald/update.pb.go +++ b/apis/grpc/v1/vald/update.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/update.proto @@ -46,7 +46,7 @@ var file_v1_vald_update_proto_rawDesc = []byte{ 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x18, 0x76, 0x31, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2f, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, - 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x9f, 0x02, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, + 0x64, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x32, 0x92, 0x03, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x55, 0x0a, 0x06, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x1a, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x2e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, @@ -64,32 +64,42 @@ var file_v1_vald_update_proto_rawDesc = []byte{ 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x1b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x15, 0x3a, 0x01, 0x2a, 0x22, 0x10, 0x2f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, - 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x42, 0x53, 0x0a, 0x1a, 0x6f, 0x72, 0x67, - 0x2e, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x2e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x31, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x64, 0x55, 0x70, 0x64, - 0x61, 0x74, 0x65, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, 0x61, 0x70, 0x69, - 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x2f, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x12, 0x71, 0x0a, 0x0f, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x23, 0x2e, 0x70, + 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1b, 0x2e, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x2e, 0x76, 0x31, 0x2e, 0x4f, + 0x62, 0x6a, 0x65, 0x63, 0x74, 0x2e, 0x4c, 0x6f, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1c, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x16, 0x3a, 0x01, 0x2a, 0x22, 0x11, 0x2f, 0x75, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x42, 0x53, 0x0a, 0x1a, + 0x6f, 0x72, 0x67, 0x2e, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x2e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x31, 0x2e, 0x76, 0x61, 0x6c, 0x64, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x64, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x76, 0x64, 0x61, 0x61, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x64, 0x2f, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61, 0x6c, + 0x64, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var file_v1_vald_update_proto_goTypes = []any{ - (*payload.Update_Request)(nil), // 0: payload.v1.Update.Request - (*payload.Update_MultiRequest)(nil), // 1: payload.v1.Update.MultiRequest - (*payload.Object_Location)(nil), // 2: payload.v1.Object.Location - (*payload.Object_StreamLocation)(nil), // 3: payload.v1.Object.StreamLocation - (*payload.Object_Locations)(nil), // 4: payload.v1.Object.Locations + (*payload.Update_Request)(nil), // 0: payload.v1.Update.Request + (*payload.Update_MultiRequest)(nil), // 1: payload.v1.Update.MultiRequest + (*payload.Update_TimestampRequest)(nil), // 2: payload.v1.Update.TimestampRequest + (*payload.Object_Location)(nil), // 3: payload.v1.Object.Location + (*payload.Object_StreamLocation)(nil), // 4: payload.v1.Object.StreamLocation + (*payload.Object_Locations)(nil), // 5: payload.v1.Object.Locations } var file_v1_vald_update_proto_depIdxs = []int32{ 0, // 0: vald.v1.Update.Update:input_type -> payload.v1.Update.Request 0, // 1: vald.v1.Update.StreamUpdate:input_type -> payload.v1.Update.Request 1, // 2: vald.v1.Update.MultiUpdate:input_type -> payload.v1.Update.MultiRequest - 2, // 3: vald.v1.Update.Update:output_type -> payload.v1.Object.Location - 3, // 4: vald.v1.Update.StreamUpdate:output_type -> payload.v1.Object.StreamLocation - 4, // 5: vald.v1.Update.MultiUpdate:output_type -> payload.v1.Object.Locations - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type + 2, // 3: vald.v1.Update.UpdateTimestamp:input_type -> payload.v1.Update.TimestampRequest + 3, // 4: vald.v1.Update.Update:output_type -> payload.v1.Object.Location + 4, // 5: vald.v1.Update.StreamUpdate:output_type -> payload.v1.Object.StreamLocation + 5, // 6: vald.v1.Update.MultiUpdate:output_type -> payload.v1.Object.Locations + 3, // 7: vald.v1.Update.UpdateTimestamp:output_type -> payload.v1.Object.Location + 4, // [4:8] is the sub-list for method output_type + 0, // [0:4] is the sub-list for method input_type 0, // [0:0] is the sub-list for extension type_name 0, // [0:0] is the sub-list for extension extendee 0, // [0:0] is the sub-list for field type_name diff --git a/apis/grpc/v1/vald/update_vtproto.pb.go b/apis/grpc/v1/vald/update_vtproto.pb.go index 19596ff233..cfef27ea97 100644 --- a/apis/grpc/v1/vald/update_vtproto.pb.go +++ b/apis/grpc/v1/vald/update_vtproto.pb.go @@ -48,6 +48,8 @@ type UpdateClient interface { StreamUpdate(ctx context.Context, opts ...grpc.CallOption) (Update_StreamUpdateClient, error) // A method to update multiple indexed vectors in a single request. MultiUpdate(ctx context.Context, in *payload.Update_MultiRequest, opts ...grpc.CallOption) (*payload.Object_Locations, error) + // A method to update timestamp an indexed vector. + UpdateTimestamp(ctx context.Context, in *payload.Update_TimestampRequest, opts ...grpc.CallOption) (*payload.Object_Location, error) } type updateClient struct { @@ -113,6 +115,17 @@ func (c *updateClient) MultiUpdate( return out, nil } +func (c *updateClient) UpdateTimestamp( + ctx context.Context, in *payload.Update_TimestampRequest, opts ...grpc.CallOption, +) (*payload.Object_Location, error) { + out := new(payload.Object_Location) + err := c.cc.Invoke(ctx, "/vald.v1.Update/UpdateTimestamp", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // UpdateServer is the server API for Update service. // All implementations must embed UnimplementedUpdateServer // for forward compatibility @@ -123,6 +136,8 @@ type UpdateServer interface { StreamUpdate(Update_StreamUpdateServer) error // A method to update multiple indexed vectors in a single request. MultiUpdate(context.Context, *payload.Update_MultiRequest) (*payload.Object_Locations, error) + // A method to update timestamp an indexed vector. + UpdateTimestamp(context.Context, *payload.Update_TimestampRequest) (*payload.Object_Location, error) mustEmbedUnimplementedUpdateServer() } @@ -144,6 +159,12 @@ func (UnimplementedUpdateServer) MultiUpdate( ) (*payload.Object_Locations, error) { return nil, status.Errorf(codes.Unimplemented, "method MultiUpdate not implemented") } + +func (UnimplementedUpdateServer) UpdateTimestamp( + context.Context, *payload.Update_TimestampRequest, +) (*payload.Object_Location, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTimestamp not implemented") +} func (UnimplementedUpdateServer) mustEmbedUnimplementedUpdateServer() {} // UnsafeUpdateServer may be embedded to opt out of forward compatibility for this service. @@ -223,6 +244,26 @@ func _Update_MultiUpdate_Handler( return interceptor(ctx, in, info, handler) } +func _Update_UpdateTimestamp_Handler( + srv any, ctx context.Context, dec func(any) error, interceptor grpc.UnaryServerInterceptor, +) (any, error) { + in := new(payload.Update_TimestampRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UpdateServer).UpdateTimestamp(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/vald.v1.Update/UpdateTimestamp", + } + handler := func(ctx context.Context, req any) (any, error) { + return srv.(UpdateServer).UpdateTimestamp(ctx, req.(*payload.Update_TimestampRequest)) + } + return interceptor(ctx, in, info, handler) +} + // Update_ServiceDesc is the grpc.ServiceDesc for Update service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -238,6 +279,10 @@ var Update_ServiceDesc = grpc.ServiceDesc{ MethodName: "MultiUpdate", Handler: _Update_MultiUpdate_Handler, }, + { + MethodName: "UpdateTimestamp", + Handler: _Update_UpdateTimestamp_Handler, + }, }, Streams: []grpc.StreamDesc{ { diff --git a/apis/grpc/v1/vald/upsert.pb.go b/apis/grpc/v1/vald/upsert.pb.go index 8bf4d4be1b..1a912522dc 100644 --- a/apis/grpc/v1/vald/upsert.pb.go +++ b/apis/grpc/v1/vald/upsert.pb.go @@ -16,7 +16,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.36.2 +// protoc-gen-go v1.35.2 // protoc (unknown) // source: v1/vald/upsert.proto diff --git a/apis/grpc/v1/vald/vald.go b/apis/grpc/v1/vald/vald.go index dd3749c394..e7edaba5f3 100644 --- a/apis/grpc/v1/vald/vald.go +++ b/apis/grpc/v1/vald/vald.go @@ -95,6 +95,7 @@ const ( UpdateObjectRPCName = "UpdateObject" StreamUpdateObjectRPCName = "StreamUpdateObject" MultiUpdateObjectRPCName = "MultiUpdateObject" + UpdateTimestampRPCName = "UpdateTimestamp" UpsertRPCName = "Upsert" StreamUpsertRPCName = "StreamUpsert" diff --git a/apis/proto/v1/mirror/mirror.proto b/apis/proto/v1/mirror/mirror.proto index d2bb1bb39f..cef46c8eac 100644 --- a/apis/proto/v1/mirror/mirror.proto +++ b/apis/proto/v1/mirror/mirror.proto @@ -13,6 +13,7 @@ // See the License for the specific language governing permissions and // limitations under the License. // + syntax = "proto3"; package mirror.v1; @@ -25,9 +26,18 @@ option java_multiple_files = true; option java_outer_classname = "ValdMirror"; option java_package = "org.vdaas.vald.api.v1.mirror"; -// Represent the mirror service. +// Overview +// Mirror Service is responsible for providing the `Register` interface for the Vald Mirror Gateway. service Mirror { - // Register is the RPC to register other mirror servers. + // Overview + // Register RPC is the method to register other Vald Mirror Gateway targets. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 13 | INTERNAL | rpc Register(payload.v1.Mirror.Targets) returns (payload.v1.Mirror.Targets) { option (google.api.http) = { post: "/mirror/register" diff --git a/apis/proto/v1/vald/filter.proto b/apis/proto/v1/vald/filter.proto index f460ad926a..bb5102743d 100644 --- a/apis/proto/v1/vald/filter.proto +++ b/apis/proto/v1/vald/filter.proto @@ -26,9 +26,21 @@ option java_multiple_files = true; option java_outer_classname = "ValdFilter"; option java_package = "org.vdaas.vald.api.v1.vald"; -// Filter service provides ways to connect to Vald through filter. +// Overview +// Filter Server is responsible for providing insert, update, upsert and search interface for `Vald Filter Gateway`. +// +// Vald Filter Gateway forward user request to user-defined ingress/egress filter components allowing user to run custom logic. service Filter { - // A method to search object. + // Overview + // SearchObject RPC is the method to search object(s) similar to request object. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc SearchObject(payload.v1.Search.ObjectRequest) returns (payload.v1.Search.Response) { option (google.api.http) = { post: "/search/object" @@ -36,7 +48,18 @@ service Filter { }; } - // A method to search multiple objects. + // Overview + // StreamSearchObject RPC is the method to search vectors with multi queries(objects) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // By using the bidirectional streaming RPC, the search request can be communicated in any order between client and server. + // Each Search request and response are independent. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc MultiSearchObject(payload.v1.Search.MultiObjectRequest) returns (payload.v1.Search.Responses) { option (google.api.http) = { post: "/search/object/multiple" @@ -44,10 +67,33 @@ service Filter { }; } - // A method to search object by bidirectional streaming. + // Overview + // MultiSearchObject RPC is the method to search objects with multiple objects in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc StreamSearchObject(stream payload.v1.Search.ObjectRequest) returns (stream payload.v1.Search.StreamResponse) {} - // A method insert object. + // Overview + // InsertObject RPC is the method to insert object through Vald Filter Gateway. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc InsertObject(payload.v1.Insert.ObjectRequest) returns (payload.v1.Object.Location) { option (google.api.http) = { post: "/insert/object" @@ -55,10 +101,32 @@ service Filter { }; } - // Represent the streaming RPC to insert object by bidirectional streaming. + // Overview + // StreamInsertObject RPC is the method to add new multiple object using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc). + // + // By using the bidirectional streaming RPC, the insert request can be communicated in any order between client and server. + // Each Insert request and response are independent. + // It's the recommended method to insert a large number of objects. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc StreamInsertObject(stream payload.v1.Insert.ObjectRequest) returns (stream payload.v1.Object.StreamLocation) {} - // A method to insert multiple objects. + // Overview + // MultiInsertObject RPC is the method to add multiple new objects in **1** request. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc MultiInsertObject(payload.v1.Insert.MultiObjectRequest) returns (payload.v1.Object.Locations) { option (google.api.http) = { post: "/insert/object/multiple" @@ -66,7 +134,16 @@ service Filter { }; } - // A method to update object. + // Overview + // UpdateObject RPC is the method to update a single vector. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc UpdateObject(payload.v1.Update.ObjectRequest) returns (payload.v1.Object.Location) { option (google.api.http) = { post: "/update/object" @@ -74,10 +151,37 @@ service Filter { }; } - // A method to update object by bidirectional streaming. + // Overview + // StreamUpdateObject RPC is the method to update multiple objects using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // By using the bidirectional streaming RPC, the update request can be communicated in any order between client and server. + // Each Update request and response are independent. + // It's the recommended method to update the large amount of objects. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc StreamUpdateObject(stream payload.v1.Update.ObjectRequest) returns (stream payload.v1.Object.StreamLocation) {} - // A method to update multiple objects. + // Overview + // MultiUpdateObject is the method to update multiple objects in **1** request. + // + //
+ // gRPC has the message size limitation.
+ // Please be careful that the size of the request exceed the limit. + //
+ // --- + // Status Code + // + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc MultiUpdateObject(payload.v1.Update.MultiObjectRequest) returns (payload.v1.Object.Locations) { option (google.api.http) = { post: "/update/object/multiple" @@ -85,7 +189,16 @@ service Filter { }; } - // A method to upsert object. + // Overview + // UpsertObject RPC is the method to update a single object and add a new single object. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc UpsertObject(payload.v1.Upsert.ObjectRequest) returns (payload.v1.Object.Location) { option (google.api.http) = { post: "/upsert/object" @@ -93,10 +206,33 @@ service Filter { }; } - // A method to upsert object by bidirectional streaming. + // Overview + // UpsertObject RPC is the method to update a single object and add a new single object. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc StreamUpsertObject(stream payload.v1.Upsert.ObjectRequest) returns (stream payload.v1.Object.StreamLocation) {} - // A method to upsert multiple objects. + // Overview + // MultiUpsertObject is the method to update existing multiple objects and add new multiple objects in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 13 | INTERNAL | rpc MultiUpsertObject(payload.v1.Upsert.MultiObjectRequest) returns (payload.v1.Object.Locations) { option (google.api.http) = { post: "/upsert/object/multiple" diff --git a/apis/proto/v1/vald/flush.proto b/apis/proto/v1/vald/flush.proto index 210e227346..23a069195e 100644 --- a/apis/proto/v1/vald/flush.proto +++ b/apis/proto/v1/vald/flush.proto @@ -26,9 +26,19 @@ option java_multiple_files = true; option java_outer_classname = "ValdFlush"; option java_package = "org.vdaas.vald.api.v1.vald"; -// Flush service provides ways to flush all indexed vectors. +// Overview +// Flush Service is responsible for removing all vectors that are indexed and uncommitted in the `vald-agent`. service Flush { - // A method to flush all indexed vector. + // Overview + // Flush RPC is the method to remove all vectors. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 13 | INTERNAL | rpc Flush(payload.v1.Flush.Request) returns (payload.v1.Info.Index.Count) { option (google.api.http) = {delete: "/flush"}; } diff --git a/apis/proto/v1/vald/index.proto b/apis/proto/v1/vald/index.proto index efa1396d27..e66b21ab21 100644 --- a/apis/proto/v1/vald/index.proto +++ b/apis/proto/v1/vald/index.proto @@ -26,26 +26,34 @@ option java_multiple_files = true; option java_outer_classname = "ValdIndex"; option java_package = "org.vdaas.vald.api.v1.vald"; +// Overview // Represent the index manager service. service Index { + // Overview // Represent the RPC to get the index information. rpc IndexInfo(payload.v1.Empty) returns (payload.v1.Info.Index.Count) { option (google.api.http).get = "/index/info"; } + + // Overview // Represent the RPC to get the index information for each agents. rpc IndexDetail(payload.v1.Empty) returns (payload.v1.Info.Index.Detail) { option (google.api.http).get = "/index/detail"; } + + // Overview // Represent the RPC to get the index statistics. rpc IndexStatistics(payload.v1.Empty) returns (payload.v1.Info.Index.Statistics) { option (google.api.http).get = "/index/statistics"; } + // Overview // Represent the RPC to get the index statistics for each agents. rpc IndexStatisticsDetail(payload.v1.Empty) returns (payload.v1.Info.Index.StatisticsDetail) { option (google.api.http).get = "/index/statistics/detail"; } + // Overview // Represent the RPC to get the index property. rpc IndexProperty(payload.v1.Empty) returns (payload.v1.Info.Index.PropertyDetail) { option (google.api.http).get = "/index/property"; diff --git a/apis/proto/v1/vald/insert.proto b/apis/proto/v1/vald/insert.proto index ef89c45355..84aea00980 100644 --- a/apis/proto/v1/vald/insert.proto +++ b/apis/proto/v1/vald/insert.proto @@ -26,9 +26,32 @@ option java_multiple_files = true; option java_outer_classname = "ValdInsert"; option java_package = "org.vdaas.vald.api.v1.vald"; -// Insert service provides ways to add new vectors. +// Overview +// Insert Service is responsible for inserting new vectors into the `vald-agent`. service Insert { - // A method to add a new single vector. + // Overview + // Inset RPC is the method to add a new single vector. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | ALREADY_EXISTS | Request ID is already inserted. | Change request ID. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc Insert(payload.v1.Insert.Request) returns (payload.v1.Object.Location) { option (google.api.http) = { post: "/insert" @@ -36,10 +59,64 @@ service Insert { }; } - // A method to add new multiple vectors by bidirectional streaming. + // Overview + // StreamInsert RPC is the method to add new multiple vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the insert request can be communicated in any order between client and server. + // Each Insert request and response are independent. + // It's the recommended method to insert a large number of vectors. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | ALREADY_EXISTS | Request ID is already inserted. | Change request ID. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamInsert(stream payload.v1.Insert.Request) returns (stream payload.v1.Object.StreamLocation) {} - // A method to add new multiple vectors in a single request. + // Overview + // MultiInsert RPC is the method to add multiple new vectors in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 6 | ALREADY_EXISTS | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | ALREADY_EXISTS | Request ID is already inserted. | Change request ID. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc MultiInsert(payload.v1.Insert.MultiRequest) returns (payload.v1.Object.Locations) { option (google.api.http) = { post: "/insert/multiple" diff --git a/apis/proto/v1/vald/object.proto b/apis/proto/v1/vald/object.proto index 9962146176..209f05e618 100644 --- a/apis/proto/v1/vald/object.proto +++ b/apis/proto/v1/vald/object.proto @@ -26,27 +26,110 @@ option java_multiple_files = true; option java_outer_classname = "ValdObject"; option java_package = "org.vdaas.vald.api.v1.vald"; -// Object service provides ways to fetch indexed vectors. +// Overview +// Object Service is responsible for getting inserted vectors and checking whether vectors are inserted into the `vald-agent`. service Object { - // A method to check whether a specified ID is indexed or not. + // Overview + // Exists RPC is the method to check that a vector exists in the `vald-agent`. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc Exists(payload.v1.Object.ID) returns (payload.v1.Object.ID) { option (google.api.http).get = "/exists/{id}"; } - // A method to fetch a vector. + // Overview + // GetObject RPC is the method to get the metadata of a vector inserted into the `vald-agent`. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc GetObject(payload.v1.Object.VectorRequest) returns (payload.v1.Object.Vector) { option (google.api.http).get = "/object/{id.id}"; } - // A method to fetch vectors by bidirectional streaming. + // Overview + // StreamGetObject RPC is the method to get the metadata of multiple existing vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the GetObject request can be communicated in any order between client and server. + // Each Upsert request and response are independent. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamGetObject(stream payload.v1.Object.VectorRequest) returns (stream payload.v1.Object.StreamVector) {} + // Overview // A method to get all the vectors with server streaming + // --- + // Status Code + // TODO + // --- + // Troubleshooting + // TODO rpc StreamListObject(payload.v1.Object.List.Request) returns (stream payload.v1.Object.List.Response) { option (google.api.http).get = "/object/list"; } + // Overview // Represent the RPC to get the vector metadata. This RPC is mainly used for index correction process + // --- + // Status Code + // TODO + // --- + // Troubleshooting + // TODO rpc GetTimestamp(payload.v1.Object.TimestampRequest) returns (payload.v1.Object.Timestamp) { option (google.api.http).get = "/object/meta/{id.id}"; } diff --git a/apis/proto/v1/vald/remove.proto b/apis/proto/v1/vald/remove.proto index c0dee5e7bb..41d813b87e 100644 --- a/apis/proto/v1/vald/remove.proto +++ b/apis/proto/v1/vald/remove.proto @@ -26,9 +26,33 @@ option java_multiple_files = true; option java_outer_classname = "ValdRemove"; option java_package = "org.vdaas.vald.api.v1.vald"; -// Remove service provides ways to remove indexed vectors. +// Overview +// Remove Service is responsible for removing vectors indexed in the `vald-agent`. service Remove { - // A method to remove an indexed vector. + // Overview + // Remove RPC is the method to remove a single vector. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc Remove(payload.v1.Remove.Request) returns (payload.v1.Object.Location) { option (google.api.http) = { post: "/remove" @@ -36,7 +60,33 @@ service Remove { }; } - // A method to remove an indexed vector based on timestamp. + // Overview + // RemoveByTimestamp RPC is the method to remove vectors based on timestamp. + // + //
+ // In the TimestampRequest message, the 'timestamps' field is repeated, allowing the inclusion of multiple Timestamp.
+ // When multiple Timestamps are provided, it results in an `AND` condition, enabling the realization of deletions with specified ranges.
+ // This design allows for versatile deletion operations, facilitating tasks such as removing data within a specific time range. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :---------------------------------------------------------------------------------------------- | :------------------------------------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | No vectors in the system match the specified timestamp conditions. | Check whether vectors matching the specified timestamp conditions exist in the system, and fix conditions if needed. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. rpc RemoveByTimestamp(payload.v1.Remove.TimestampRequest) returns (payload.v1.Object.Locations) { option (google.api.http) = { post: "/remove/timestamp" @@ -44,10 +94,66 @@ service Remove { }; } + // Overview // A method to remove multiple indexed vectors by bidirectional streaming. + // + // StreamRemove RPC is the method to remove multiple vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the remove request can be communicated in any order between client and server. + // Each Remove request and response are independent. + // It's the recommended method to remove a large number of vectors. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamRemove(stream payload.v1.Remove.Request) returns (stream payload.v1.Object.StreamLocation) {} - // A method to remove multiple indexed vectors in a single request. + // Overview + // MultiRemove is the method to remove multiple vectors in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :---------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc MultiRemove(payload.v1.Remove.MultiRequest) returns (payload.v1.Object.Locations) { option (google.api.http) = { post: "/remove/multiple" diff --git a/apis/proto/v1/vald/search.proto b/apis/proto/v1/vald/search.proto index fa05726784..29d0ad7c9b 100644 --- a/apis/proto/v1/vald/search.proto +++ b/apis/proto/v1/vald/search.proto @@ -26,9 +26,33 @@ option java_multiple_files = true; option java_outer_classname = "ValdSearch"; option java_package = "org.vdaas.vald.api.v1.vald"; -// Search service provides ways to search indexed vectors. +// Overview +// Search Service is responsible for searching vectors similar to the user request vector from `vald-agent`. service Search { - // A method to search indexed vectors by a raw vector. + // Overview + // Search RPC is the method to search vector(s) similar to the request vector. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc Search(payload.v1.Search.Request) returns (payload.v1.Search.Response) { option (google.api.http) = { post: "/search" @@ -36,7 +60,31 @@ service Search { }; } - // A method to search indexed vectors by ID. + // Overview + // SearchByID RPC is the method to search similar vectors using a user-defined vector ID.
+ // The vector with the same requested ID should be indexed into the `vald-agent` before searching. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc SearchByID(payload.v1.Search.IDRequest) returns (payload.v1.Search.Response) { option (google.api.http) = { post: "/search/id" @@ -44,13 +92,91 @@ service Search { }; } - // A method to search indexed vectors by multiple vectors. + // Overview + // StreamSearch RPC is the method to search vectors with multi queries(vectors) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the search request can be communicated in any order between the client and server. + // Each Search request and response are independent. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamSearch(stream payload.v1.Search.Request) returns (stream payload.v1.Search.StreamResponse) {} - // A method to search indexed vectors by multiple IDs. + // Overview + // StreamSearchByID RPC is the method to search vectors with multi queries(IDs) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the search request can be communicated in any order between the client and server. + // Each SearchByID request and response are independent. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamSearchByID(stream payload.v1.Search.IDRequest) returns (stream payload.v1.Search.StreamResponse) {} - // A method to search indexed vectors by multiple vectors in a single request. + // Overview + // MultiSearch RPC is the method to search vectors with multiple vectors in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc MultiSearch(payload.v1.Search.MultiRequest) returns (payload.v1.Search.Responses) { option (google.api.http) = { post: "/search/multiple" @@ -58,7 +184,35 @@ service Search { }; } - // A method to search indexed vectors by multiple IDs in a single request. + // Overview + // MultiSearchByID RPC is the method to search vectors with multiple IDs in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc MultiSearchByID(payload.v1.Search.MultiIDRequest) returns (payload.v1.Search.Responses) { option (google.api.http) = { post: "/search/id/multiple" @@ -66,7 +220,30 @@ service Search { }; } - // A method to linear search indexed vectors by a raw vector. + // Overview + // LinearSearch RPC is the method to linear search vector(s) similar to the request vector. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc LinearSearch(payload.v1.Search.Request) returns (payload.v1.Search.Response) { option (google.api.http) = { post: "/linearsearch" @@ -74,7 +251,32 @@ service Search { }; } - // A method to linear search indexed vectors by ID. + // Overview + // LinearSearchByID RPC is the method to linear search similar vectors using a user-defined vector ID.
+ // The vector with the same requested ID should be indexed into the `vald-agent` before searching. + // You will get a `NOT_FOUND` error if the vector isn't stored. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc LinearSearchByID(payload.v1.Search.IDRequest) returns (payload.v1.Search.Response) { option (google.api.http) = { post: "/linearsearch/id" @@ -82,14 +284,91 @@ service Search { }; } - // A method to linear search indexed vectors by multiple vectors. + // Overview + // StreamLinearSearch RPC is the method to linear search vectors with multi queries(vectors) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the linear search request can be communicated in any order between the client and server. + // Each LinearSearch request and response are independent. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamLinearSearch(stream payload.v1.Search.Request) returns (stream payload.v1.Search.StreamResponse) {} - // A method to linear search indexed vectors by multiple IDs. + // Overview + // StreamLinearSearchByID RPC is the method to linear search vectors with multi queries(IDs) using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the linear search request can be communicated in any order between the client and server. + // Each LinearSearchByID request and response are independent. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamLinearSearchByID(stream payload.v1.Search.IDRequest) returns (stream payload.v1.Search.StreamResponse) {} - // A method to linear search indexed vectors by multiple vectors in a single - // request. + // Overview + // MultiLinearSearch RPC is the method to linear search vectors with multiple vectors in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Search result is empty or insufficient to request result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc MultiLinearSearch(payload.v1.Search.MultiRequest) returns (payload.v1.Search.Responses) { option (google.api.http) = { post: "/linearsearch/multiple" @@ -97,8 +376,35 @@ service Search { }; } - // A method to linear search indexed vectors by multiple IDs in a single - // request. + // Overview + // MultiLinearSearchByID RPC is the method to linear search vectors with multiple IDs in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Requested vector's ID is empty, or some request payload is invalid. | Check request payload and fix request payload. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | The Requested ID is not inserted on the target Vald cluster, or the search result is insufficient to the required result length. | Send a request with another vector or set min_num to a smaller value. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc MultiLinearSearchByID(payload.v1.Search.MultiIDRequest) returns (payload.v1.Search.Responses) { option (google.api.http) = { post: "/linearsearch/id/multiple" diff --git a/apis/proto/v1/vald/update.proto b/apis/proto/v1/vald/update.proto index e6f0728f84..94253ab534 100644 --- a/apis/proto/v1/vald/update.proto +++ b/apis/proto/v1/vald/update.proto @@ -26,9 +26,35 @@ option java_multiple_files = true; option java_outer_classname = "ValdUpdate"; option java_package = "org.vdaas.vald.api.v1.vald"; -// Update service provides ways to update indexed vectors. +// Overview +// Update Service updates to new vector from inserted vector in the `vald-agent` components. service Update { - // A method to update an indexed vector. + // Overview + // Update RPC is the method to update a single vector. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 6 | ALREADY_EXISTS | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | ALREADY_EXISTS | Request pair of ID and vector is already inserted. | Change request ID. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc Update(payload.v1.Update.Request) returns (payload.v1.Object.Location) { option (google.api.http) = { post: "/update" @@ -36,14 +62,87 @@ service Update { }; } - // A method to update multiple indexed vectors by bidirectional streaming. + // Overview + // StreamUpdate RPC is the method to update multiple vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the update request can be communicated in any order between client and server. + // Each Update request and response are independent. + // It's the recommended method to update the large amount of vectors. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 6 | ALREADY_EXISTS | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | ALREADY_EXISTS | Request pair of ID and vector is already inserted. | Change request ID. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamUpdate(stream payload.v1.Update.Request) returns (stream payload.v1.Object.StreamLocation) {} - // A method to update multiple indexed vectors in a single request. + // Overview + // MultiUpdate is the method to update multiple vectors in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 6 | ALREADY_EXISTS | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | NOT_FOUND | Requested ID is NOT inserted. | Send a request with an ID that is already inserted. | + // | ALREADY_EXISTS | Request pair of ID and vector is already inserted. | Change request ID. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc MultiUpdate(payload.v1.Update.MultiRequest) returns (payload.v1.Object.Locations) { option (google.api.http) = { post: "/update/multiple" body: "*" }; } + + // Overview + // A method to update timestamp an indexed vector. + // --- + // Status Code + // TODO + // --- + // Troubleshooting + // TODO + rpc UpdateTimestamp(payload.v1.Update.TimestampRequest) returns (payload.v1.Object.Location) { + option (google.api.http) = { + post: "/update/timestamp" + body: "*" + }; + } } diff --git a/apis/proto/v1/vald/upsert.proto b/apis/proto/v1/vald/upsert.proto index 44604698f9..9a2177f0d7 100644 --- a/apis/proto/v1/vald/upsert.proto +++ b/apis/proto/v1/vald/upsert.proto @@ -26,9 +26,34 @@ option java_multiple_files = true; option java_outer_classname = "ValdUpsert"; option java_package = "org.vdaas.vald.api.v1.vald"; -// Upsert service provides ways to insert/update vectors. +// Overview +// Upsert Service is responsible for updating existing vectors in the `vald-agent` or inserting new vectors into the `vald-agent` if the vector does not exist. service Upsert { - // A method to insert/update a vector. + // Overview + // Upsert RPC is the method to update the inserted vector to a new single vector or add a new single vector if not inserted before. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 6 | ALREADY_EXISTS | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | ALREADY_EXISTS | Requested pair of ID and vector is already inserted | Change request payload or nothing to do if update is unnecessary. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc Upsert(payload.v1.Upsert.Request) returns (payload.v1.Object.Location) { option (google.api.http) = { post: "/upsert" @@ -36,10 +61,66 @@ service Upsert { }; } - // A method to insert/update multiple vectors by bidirectional streaming. + // Overview + // StreamUpsert RPC is the method to update multiple existing vectors or add new multiple vectors using the [bidirectional streaming RPC](https://grpc.io/docs/what-is-grpc/core-concepts/#bidirectional-streaming-rpc).
+ // Using the bidirectional streaming RPC, the upsert request can be communicated in any order between the client and server. + // Each Upsert request and response are independent. + // It’s the recommended method to upsert a large number of vectors. + // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 6 | ALREADY_EXISTS | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | ALREADY_EXISTS | Requested pair of ID and vector is already inserted | Change request payload or nothing to do if update is unnecessary. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc StreamUpsert(stream payload.v1.Upsert.Request) returns (stream payload.v1.Object.StreamLocation) {} - // A method to insert/update multiple vectors in a single request. + // Overview + // MultiUpsert is the method to update existing multiple vectors and add new multiple vectors in **1** request. + // + //
+ // gRPC has a message size limitation.
+ // Please be careful that the size of the request exceeds the limit. + //
+ // --- + // Status Code + // | 0 | OK | + // | 1 | CANCELLED | + // | 3 | INVALID_ARGUMENT | + // | 4 | DEADLINE_EXCEEDED | + // | 5 | NOT_FOUND | + // | 6 | ALREADY_EXISTS | + // | 10 | ABORTED | + // | 13 | INTERNAL | + // --- + // Troubleshooting + // The request process may not be completed when the response code is NOT `0 (OK)`. + // + // Here are some common reasons and how to resolve each error. + // + // | name | common reason | how to resolve | + // | :---------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------- | :--------------------------------------------------------------------------------------- | + // | CANCELLED | Executed cancel() of rpc from client/server-side or network problems between client and server. | Check the code, especially around timeout and connection management, and fix if needed. | + // | INVALID_ARGUMENT | The Dimension of the request vector is NOT the same as Vald Agent's config, the requested vector's ID is empty, or some request payload is invalid. | Check Agent config, request payload, and fix request payload or Agent config. | + // | DEADLINE_EXCEEDED | The RPC timeout setting is too short on the client/server side. | Check the gRPC timeout setting on both the client and server sides and fix it if needed. | + // | ALREADY_EXISTS | Requested pair of ID and vector is already inserted | Change request payload or nothing to do if update is unnecessary. | + // | INTERNAL | Target Vald cluster or network route has some critical error. | Check target Vald cluster first and check network route including ingress as second. | rpc MultiUpsert(payload.v1.Upsert.MultiRequest) returns (payload.v1.Object.Locations) { option (google.api.http) = { post: "/upsert/multiple" diff --git a/apis/swagger/v1/vald/update.swagger.json b/apis/swagger/v1/vald/update.swagger.json index d295febc19..6e6c883681 100644 --- a/apis/swagger/v1/vald/update.swagger.json +++ b/apis/swagger/v1/vald/update.swagger.json @@ -75,6 +75,38 @@ ], "tags": ["Update"] } + }, + "/update/timestamp": { + "post": { + "summary": "A method to update timestamp an indexed vector.", + "operationId": "Update_UpdateTimestamp", + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/v1ObjectLocation" + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "$ref": "#/definitions/rpcStatus" + } + } + }, + "parameters": [ + { + "name": "body", + "description": "Represent a vector meta data.", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/v1UpdateTimestampRequest" + } + } + ], + "tags": ["Update"] + } } }, "definitions": { @@ -262,6 +294,25 @@ } }, "description": "Represent the update request." + }, + "v1UpdateTimestampRequest": { + "type": "object", + "properties": { + "id": { + "type": "string", + "description": "The vector ID." + }, + "timestamp": { + "type": "string", + "format": "int64", + "description": "timestamp represents when this vector inserted." + }, + "force": { + "type": "boolean", + "description": "force represents forcefully update the timestamp." + } + }, + "description": "Represent a vector meta data." } } } diff --git a/buf.gen.yaml b/buf.gen.yaml index 290395f4fd..7f3834ed1e 100644 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -13,38 +13,35 @@ # See the License for the specific language governing permissions and # limitations under the License. # -version: v1 +version: v2 managed: - enabled: false - go_package_prefix: - default: github.com/vdaas/vald/apis/grpc - except: - - buf.build/googleapis/googleapis + disable: + - file_option: go_package + module: buf.build/googleapis/googleapis + override: + - file_option: go_package_prefix + value: github.com/vdaas/vald/apis/grpc plugins: - - plugin: buf.build/protocolbuffers/go + - remote: buf.build/protocolbuffers/go out: apis/grpc - opt: - - paths=source_relative - - plugin: buf.build/community/planetscale-vtprotobuf + opt: paths=source_relative + - remote: buf.build/community/planetscale-vtprotobuf out: apis/grpc opt: - paths=source_relative - features=grpc+marshal+unmarshal+size+equal+clone - - plugin: buf.build/community/mfridman-go-json + - remote: buf.build/community/mfridman-go-json out: apis/grpc - opt: - - paths=source_relative - - plugin: buf.build/community/pseudomuto-doc + opt: paths=source_relative + - remote: buf.build/community/pseudomuto-doc out: apis/docs/v1 - opt: - - markdown,docs.md - - plugin: buf.build/grpc-ecosystem/openapiv2 + opt: markdown,docs.md + - remote: buf.build/grpc-ecosystem/openapiv2 out: apis/swagger - opt: - - json_names_for_fields=true - - plugin: buf.build/community/neoeinstein-prost + opt: json_names_for_fields=true + - remote: buf.build/community/neoeinstein-prost out: rust/libs/proto/src - - plugin: buf.build/community/neoeinstein-tonic + opt: enable_type_names + - remote: buf.build/community/neoeinstein-tonic out: rust/libs/proto/src - opt: - - no_include + opt: no_include diff --git a/buf.lock b/buf.lock new file mode 100644 index 0000000000..fe0ef317bb --- /dev/null +++ b/buf.lock @@ -0,0 +1,9 @@ +# Generated by buf. DO NOT EDIT. +version: v2 +deps: + - name: buf.build/bufbuild/protovalidate + commit: 1baebb0a15184714854fa1ddfd22a29b + digest: b5:6ee11b05d5f54b6257c79c3141eb1050723c4433cf490259ba1d8c02cd08c3e325625527b00fec4dc8b338901f1f3c567ef7ff0698b1c92d09b2dfa6f516c1e6 + - name: buf.build/googleapis/googleapis + commit: 28151c0d0a1641bf938a7672c500e01d + digest: b5:93b70089baa4fc05a92d3e52db91a4b7812db3b57b9664f6cb301733938cb630e377a938e8a56779388171c749c1d42a2e9a6c6230f2ff45f127a8102a6a27d0 diff --git a/charts/vald-benchmark-operator/Chart.yaml b/charts/vald-benchmark-operator/Chart.yaml index ab480c8ff5..5b91331a10 100644 --- a/charts/vald-benchmark-operator/Chart.yaml +++ b/charts/vald-benchmark-operator/Chart.yaml @@ -32,7 +32,7 @@ appVersion: "1.16.0" apiVersion: v2 name: vald-benchmark-operator -version: v1.7.13 +version: v1.7.16 description: A benchmark operator for benchmarking the Vald cluster. type: application keywords: diff --git a/charts/vald-benchmark-operator/README.md b/charts/vald-benchmark-operator/README.md index a5117ebca4..d2f4ad988f 100644 --- a/charts/vald-benchmark-operator/README.md +++ b/charts/vald-benchmark-operator/README.md @@ -2,7 +2,7 @@ This is a Helm chart to install `vald-benchmark-operator`. -Current chart version is `v1.7.13` +Current chart version is `v1.7.16` ## Table of Contents @@ -28,7 +28,7 @@ Run the following command to install the chart, | env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}},{"name":"JOB_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | | image.pullPolicy | string | `"Always"` | image pull policy | | image.repository | string | `"vdaas/vald-benchmark-operator"` | job image repository | -| image.tag | string | `"v1.7.13"` | image tag for job docker image | +| image.tag | string | `"v1.7.16"` | image tag for job docker image | | job.client_config.addrs | list | `[]` | gRPC client addresses | | job.client_config.backoff.backoff_factor | float | `1.1` | gRPC client backoff factor | | job.client_config.backoff.backoff_time_limit | string | `"5s"` | gRPC client backoff time limit | @@ -96,7 +96,7 @@ Run the following command to install the chart, | job.client_config.tls.key | string | `"/path/to/key"` | TLS key path | | job.image.pullPolicy | string | `"Always"` | | | job.image.repository | string | `"vdaas/vald-benchmark-job"` | | -| job.image.tag | string | `"v1.7.13"` | | +| job.image.tag | string | `"v1.7.16"` | | | logging.format | string | `"raw"` | logging format. logging format must be `raw` or `json` | | logging.level | string | `"debug"` | logging level. logging level must be `debug`, `info`, `warn`, `error` or `fatal`. | | logging.logger | string | `"glg"` | logger name. currently logger must be `glg` or `zap`. | @@ -230,7 +230,7 @@ Run the following command to install the chart, | server_config.servers.grpc.server.probe_wait_time | string | `"3s"` | | | server_config.servers.grpc.server.restart | bool | `true` | | | server_config.servers.grpc.server.socket_path | string | `""` | | -| server_config.servers.grpc.serviecPort | int | `8081` | | +| server_config.servers.grpc.servicePort | int | `8081` | | | server_config.servers.rest.enabled | bool | `false` | | | server_config.tls.ca | string | `"/path/to/ca"` | | | server_config.tls.cert | string | `"/path/to/cert"` | | diff --git a/charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml b/charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml index 7c0037a53b..2bc9d474d9 100644 --- a/charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml +++ b/charts/vald-benchmark-operator/crds/valdbenchmarkjob.yaml @@ -64,17 +64,14 @@ spec: default: Available type: string spec: - type: object properties: client_config: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -90,11 +87,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -106,8 +103,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -119,8 +116,8 @@ spec: type: string size: type: integer - dial_option: type: object + dial_option: properties: backoff_base_delay: type: string @@ -139,13 +136,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -153,15 +150,14 @@ spec: type: string timeout: type: string + type: object max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -169,8 +165,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -178,8 +174,8 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -199,8 +195,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -212,12 +208,15 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer timeout: type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -227,7 +226,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -239,38 +237,39 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object concurrency_limit: - type: integer maximum: 65535 minimum: 0 + type: integer dataset: - type: object properties: group: - type: string minLength: 1 + type: string indexes: - type: integer minimum: 0 + type: integer name: - type: string enum: - original - fashion-mnist + type: string range: - type: object properties: end: - type: integer minimum: 1 - start: type: integer + start: minimum: 1 + type: integer required: - start - end + type: object url: type: string required: @@ -278,43 +277,43 @@ spec: - indexes - group - range - global_config: type: object + global_config: properties: logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object time_zone: type: string version: type: string - insert_config: type: object + insert_config: properties: skip_strict_exist_check: type: boolean timestamp: type: string + type: object job_type: - type: string enum: - insert - update @@ -323,46 +322,46 @@ spec: - remove - getobject - exists + type: string object_config: - type: object properties: filter_config: - type: object properties: host: type: string - remove_config: + type: object type: object + remove_config: properties: skip_strict_exist_check: type: boolean timestamp: type: string + type: object repetition: - type: integer minimum: 1 - replica: type: integer + replica: minimum: 1 - rps: type: integer + rps: maximum: 65535 minimum: 0 + type: integer rules: - type: array items: type: string + type: array search_config: - type: object properties: aggregation_algorithm: - type: string enum: - Unknown - ConcurrentQueue - SortSlice - SortPoolSlice - PairingHeap + type: string enable_linear_search: type: boolean epsilon: @@ -375,26 +374,22 @@ spec: type: number timeout: type: string - server_config: type: object + server_config: properties: healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -402,6 +397,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -410,15 +406,14 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string @@ -432,10 +427,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -446,10 +441,10 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -469,30 +464,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -500,6 +495,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -508,11 +504,10 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string @@ -526,10 +521,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -540,10 +535,10 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -563,28 +558,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -592,6 +587,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -600,25 +596,24 @@ spec: type: integer timeoutSeconds: type: integer - servers: + type: object + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -633,16 +628,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -658,6 +652,7 @@ spec: type: string timeout: type: string + type: object max_header_list_size: type: integer max_receive_message_size: @@ -668,10 +663,10 @@ spec: type: integer write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -682,12 +677,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -707,28 +702,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string @@ -742,10 +737,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -756,10 +751,10 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -779,31 +774,35 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - target: + type: integer + type: object + type: object type: object + target: properties: host: - type: string minLength: 1 + type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer required: - host - port + type: object ttl_seconds_after_finished: - type: integer maximum: 65535 minimum: 0 + type: integer update_config: - type: object properties: disable_balance_update: type: boolean @@ -811,8 +810,8 @@ spec: type: boolean timestamp: type: string - upsert_config: type: object + upsert_config: properties: disable_balance_update: type: boolean @@ -820,3 +819,5 @@ spec: type: boolean timestamp: type: string + type: object + type: object diff --git a/charts/vald-benchmark-operator/crds/valdbenchmarkoperatorrelease.yaml b/charts/vald-benchmark-operator/crds/valdbenchmarkoperatorrelease.yaml index e6baaa00a8..1ede034eb9 100644 --- a/charts/vald-benchmark-operator/crds/valdbenchmarkoperatorrelease.yaml +++ b/charts/vald-benchmark-operator/crds/valdbenchmarkoperatorrelease.yaml @@ -60,7 +60,6 @@ spec: - Healthy type: string spec: - type: object properties: affinity: type: object @@ -69,35 +68,32 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - job: type: object + job: properties: client_config: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -113,11 +109,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -129,8 +125,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -142,8 +138,8 @@ spec: type: string size: type: integer - dial_option: type: object + dial_option: properties: backoff_base_delay: type: string @@ -162,13 +158,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -176,15 +172,14 @@ spec: type: string timeout: type: string + type: object max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -192,8 +187,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -201,8 +196,8 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -222,8 +217,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -235,12 +230,15 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer timeout: type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -250,7 +248,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -262,60 +259,59 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - logging: + type: object type: object + logging: properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object name: type: string nodeSelector: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean otlp: - type: object properties: attribute: - type: object properties: metrics: - type: object properties: enable_cgo: type: boolean @@ -326,9 +322,10 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: type: string + type: array + type: object namespace: type: string node_name: @@ -337,6 +334,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -351,13 +349,15 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean sampling_rate: type: integer + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true @@ -365,16 +365,15 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true rbac: - type: object properties: create: type: boolean name: type: string + type: object replicas: type: integer resources: - type: object properties: limits: type: object @@ -382,31 +381,27 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -414,6 +409,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -422,13 +418,12 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: type: integer server: - type: object properties: http: - type: object properties: idle_timeout: type: string @@ -442,6 +437,7 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: @@ -450,10 +446,11 @@ spec: type: string socket_path: type: string + type: object servicePort: type: integer - readiness: type: object + readiness: properties: enabled: type: boolean @@ -462,12 +459,10 @@ spec: port: type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -475,6 +470,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -483,11 +479,10 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string @@ -501,6 +496,7 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: @@ -509,20 +505,20 @@ spec: type: string socket_path: type: string + type: object servicePort: type: integer - startup: type: object + startup: properties: enabled: type: boolean - startupProbe: type: object + startupProbe: properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -530,6 +526,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -538,11 +535,11 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean @@ -551,10 +548,8 @@ spec: port: type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string @@ -568,6 +563,7 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: @@ -576,11 +572,12 @@ spec: type: string socket_path: type: string - servers: + type: object + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean @@ -591,10 +588,8 @@ spec: port: type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -609,11 +604,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: + enum: + - RecoverInterceptor + - AccessLogInterceptor + - TraceInterceptor + - MetricInterceptor type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -629,6 +628,7 @@ spec: type: string timeout: type: string + type: object max_header_list_size: type: integer max_receive_message_size: @@ -639,6 +639,7 @@ spec: type: integer write_buffer_size: type: integer + type: object mode: type: string network: @@ -649,15 +650,17 @@ spec: type: boolean socket_path: type: string + type: object servicePort: type: integer - rest: type: object + rest: properties: enabled: type: boolean - tls: + type: object type: object + tls: properties: ca: type: string @@ -669,8 +672,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -683,24 +687,26 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: - type: string enum: - ClusterIP - LoadBalancer - NodePort - serviceAccount: + type: string type: object + serviceAccount: properties: create: type: boolean name: type: string + type: object time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array version: type: string + type: object diff --git a/charts/vald-benchmark-operator/crds/valdbenchmarkscenario.yaml b/charts/vald-benchmark-operator/crds/valdbenchmarkscenario.yaml index 5c0c6ccbe1..eab6b08774 100644 --- a/charts/vald-benchmark-operator/crds/valdbenchmarkscenario.yaml +++ b/charts/vald-benchmark-operator/crds/valdbenchmarkscenario.yaml @@ -61,34 +61,32 @@ spec: default: Available type: string spec: - type: object properties: dataset: - type: object properties: group: - type: string minLength: 1 + type: string indexes: - type: integer minimum: 0 + type: integer name: - type: string enum: - original - fashion-mnist + type: string range: - type: object properties: end: - type: integer minimum: 1 - start: type: integer + start: minimum: 1 + type: integer required: - start - end + type: object url: type: string required: @@ -96,21 +94,23 @@ spec: - indexes - group - range + type: object jobs: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array target: - type: object properties: host: - type: string minLength: 1 + type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer required: - host - port + type: object + type: object diff --git a/charts/vald-benchmark-operator/schemas/job-values.schema.json b/charts/vald-benchmark-operator/schemas/job-values.schema.json index 4c1ea57433..88b0e53942 100644 --- a/charts/vald-benchmark-operator/schemas/job-values.schema.json +++ b/charts/vald-benchmark-operator/schemas/job-values.schema.json @@ -134,7 +134,10 @@ "interceptors": { "type": "array", "description": "gRPC client interceptors", - "items": { "type": "string", "enum": ["TraceInterceptor"] } + "items": { + "type": "string", + "enum": ["TraceInterceptor", "MetricInterceptor"] + } }, "keepalive": { "type": "object", diff --git a/charts/vald-benchmark-operator/schemas/job-values.yaml b/charts/vald-benchmark-operator/schemas/job-values.yaml index 3ad09f090a..0d65926a02 100644 --- a/charts/vald-benchmark-operator/schemas/job-values.yaml +++ b/charts/vald-benchmark-operator/schemas/job-values.yaml @@ -267,7 +267,7 @@ client_config: # @schema {"name": "client_config.dial_option.timeout", "type": "string"} # client_config.dial_option.timeout -- gRPC client dial option timeout timeout: "" - # @schema {"name": "client_config.dial_option.interceptors", "type": "array", "items": {"type": "string", "enum": ["TraceInterceptor"]}} + # @schema {"name": "client_config.dial_option.interceptors", "type": "array", "items": {"type": "string", "enum": ["TraceInterceptor", "MetricInterceptor"]}} # client_config.dial_option.interceptors -- gRPC client interceptors interceptors: [] # @schema {"name": "client_config.dial_option.net", "type": "object", "anchor": "net"} @@ -800,7 +800,7 @@ server_config: reuse_port: true # server_config.healths.readiness.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality reuse_addr: true - # server_config.healths.readiness.server.socket_option.tcp_fast_oepn -- server listen socket option for tcp_fast_open functionality + # server_config.healths.readiness.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality tcp_fast_open: true # server_config.healths.readiness.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality tcp_no_delay: true diff --git a/charts/vald-benchmark-operator/templates/deployment.yaml b/charts/vald-benchmark-operator/templates/deployment.yaml index f7da1ecd43..e48ba173a6 100644 --- a/charts/vald-benchmark-operator/templates/deployment.yaml +++ b/charts/vald-benchmark-operator/templates/deployment.yaml @@ -47,7 +47,7 @@ spec: {{- if .Values.podAnnotations }} {{- toYaml . | nindent 8 }} {{- end }} - {{- if .Values.server_config.metrics.pprof.enabeld }} + {{- if .Values.server_config.metrics.pprof.enabled }} pyroscope.io/scrape: "true" pyroscope.io/application-name: {{ .Values.name }} pyroscope.io/profile-cpu-enabled: "true" diff --git a/charts/vald-benchmark-operator/values.schema.json b/charts/vald-benchmark-operator/values.schema.json index fc1ebcd1a6..6a4e98f4ba 100644 --- a/charts/vald-benchmark-operator/values.schema.json +++ b/charts/vald-benchmark-operator/values.schema.json @@ -164,7 +164,10 @@ "interceptors": { "type": "array", "description": "gRPC client interceptors", - "items": { "type": "string", "enum": ["TraceInterceptor"] } + "items": { + "type": "string", + "enum": ["TraceInterceptor", "MetricInterceptor"] + } }, "keepalive": { "type": "object", @@ -698,7 +701,15 @@ "initial_window_size": { "type": "integer" }, "interceptors": { "type": "array", - "items": { "type": "string" } + "items": { + "type": "string", + "enum": [ + "RecoverInterceptor", + "AccessLogInterceptor", + "TraceInterceptor", + "MetricInterceptor" + ] + } }, "keepalive": { "type": "object", diff --git a/charts/vald-benchmark-operator/values.yaml b/charts/vald-benchmark-operator/values.yaml index 288a95a1c8..9090d6bb89 100644 --- a/charts/vald-benchmark-operator/values.yaml +++ b/charts/vald-benchmark-operator/values.yaml @@ -33,7 +33,7 @@ image: repository: vdaas/vald-benchmark-operator # @schema {"name": "image.tag", "type": "string"} # image.tag -- image tag - tag: v1.7.13 + tag: v1.7.16 # @schema {"name": "image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} # image.pullPolicy -- image pull policy pullPolicy: Always @@ -46,7 +46,7 @@ job: repository: vdaas/vald-benchmark-job # @schema {"name": "job.image.tag", "type": "string"} # image.tag -- image tag for job docker image - tag: v1.7.13 + tag: v1.7.16 # @schema {"name": "job.image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} # image.pullPolicy -- image pull policy pullPolicy: Always @@ -171,7 +171,7 @@ job: # @schema {"name": "job.client_config.dial_option.timeout", "type": "string"} # job.client_config.dial_option.timeout -- gRPC client dial option timeout timeout: "" - # @schema {"name": "job.client_config.dial_option.interceptors", "type": "array", "items": {"type": "string", "enum": ["TraceInterceptor"]}} + # @schema {"name": "job.client_config.dial_option.interceptors", "type": "array", "items": {"type": "string", "enum": ["TraceInterceptor", "MetricInterceptor"]}} # job.client_config.dial_option.interceptors -- gRPC client interceptors interceptors: [] # @schema {"name": "job.client_config.dial_option.net", "type": "object", "anchor": "net"} @@ -384,7 +384,7 @@ server_config: # @schema {"name": "server_config.servers.grpc.port", "type": "integer"} port: 8081 # @schema {"name": "server_config.servers.grpc.servicePort", "type": "integer"} - serviecPort: 8081 + servicePort: 8081 # @schema {"name": "server_config.servers.grpc.server", "type": "object"} server: # @schema {"name": "server_config.servers.grpc.server.mode", "type": "string"} @@ -407,7 +407,7 @@ server_config: initial_conn_window_size: 0 # @schema {"name": "server_config.servers.grpc.server.grpc.initial_window_size", "type": "integer"} initial_window_size: 0 - # @schema {"name": "server_config.servers.grpc.server.grpc.interceptors", "type": "array", "items": {"type": "string"}} + # @schema {"name": "server_config.servers.grpc.server.grpc.interceptors", "type": "array", "items": {"type": "string", "enum": ["RecoverInterceptor", "AccessLogInterceptor", "TraceInterceptor", "MetricInterceptor"]}} interceptors: [] # @schema {"name": "server_config.servers.grpc.server.grpc.keepalive", "type": "object"} keepalive: diff --git a/charts/vald-helm-operator/Chart.yaml b/charts/vald-helm-operator/Chart.yaml index 074dbc5958..0153faaa85 100644 --- a/charts/vald-helm-operator/Chart.yaml +++ b/charts/vald-helm-operator/Chart.yaml @@ -16,7 +16,7 @@ apiVersion: v2 name: vald-helm-operator -version: v1.7.13 +version: v1.7.16 description: A Helm chart for vald-helm-operator type: application keywords: diff --git a/charts/vald-helm-operator/README.md b/charts/vald-helm-operator/README.md index da09fabeed..e2a964b4fc 100644 --- a/charts/vald-helm-operator/README.md +++ b/charts/vald-helm-operator/README.md @@ -2,7 +2,7 @@ This is a Helm chart to install vald-helm-operator. -Current chart version is `v1.7.13` +Current chart version is `v1.7.16` ## Table of Contents @@ -26,13 +26,13 @@ Run the following command to install the chart, Please upgrade the CRDs first because Helm doesn't have a support to upgrade CRDs. - $ kubectl replace -f https://raw.githubusercontent.com/vdaas/vald/v1.7.13/charts/vald-helm-operator/crds/valdrelease.yaml - $ kubectl replace -f https://raw.githubusercontent.com/vdaas/vald/v1.7.13/charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml + $ kubectl replace -f https://raw.githubusercontent.com/vdaas/vald/v1.7.16/charts/vald-helm-operator/crds/valdrelease.yaml + $ kubectl replace -f https://raw.githubusercontent.com/vdaas/vald/v1.7.16/charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml After upgrading CRDs, you can upgrade the operator. If you're using `valdhelmoperatorrelease` (or `vhor`) resource, please update the `spec.image.tag` field of it. - $ kubectl patch vhor vhor-release -p '{"spec":{"image":{"tag":"v1.7.13"}}}' + $ kubectl patch vhor vhor-release -p '{"spec":{"image":{"tag":"v1.7.16"}}}' On the other hand, please update the operator's deployment manually. @@ -79,7 +79,7 @@ spec: {} | healthPort | int | `8081` | port of health endpoint | | image.pullPolicy | string | `"Always"` | image pull policy | | image.repository | string | `"vdaas/vald-helm-operator"` | image repository | -| image.tag | string | `"v1.7.13"` | image tag | +| image.tag | string | `"v1.7.16"` | image tag | | leaderElectionID | string | `"vald-helm-operator"` | name of the configmap that is used for holding the leader lock. | | livenessProbe.enabled | bool | `true` | enable liveness probe. | | livenessProbe.failureThreshold | int | `2` | liveness probe failure threshold | diff --git a/charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml b/charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml index f94a4eeaa9..f4ecaef4cf 100644 --- a/charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml +++ b/charts/vald-helm-operator/crds/valdhelmoperatorrelease.yaml @@ -62,7 +62,6 @@ spec: x-kubernetes-preserve-unknown-fields: true # schema of spec must be generated by hack/helm/schema/crd/main.go. spec: - type: object properties: affinity: type: object @@ -77,29 +76,27 @@ spec: healthPort: type: integer image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object leaderElectionID: type: string livenessProbe: - type: object properties: enabled: type: boolean failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -107,6 +104,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -115,26 +113,27 @@ spec: type: integer timeoutSeconds: type: integer - logging: type: object + logging: properties: format: - type: string enum: - console - json - level: type: string + level: enum: - debug - info - error - stacktraceLevel: type: string + stacktraceLevel: enum: - debug - info - error + type: string + type: object maxConcurrentReconciles: type: integer metricsPort: @@ -153,21 +152,19 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true rbac: - type: object properties: create: type: boolean name: type: string - readinessProbe: type: object + readinessProbe: properties: enabled: type: boolean failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -175,6 +172,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -183,6 +181,7 @@ spec: type: integer timeoutSeconds: type: integer + type: object reconcilePeriod: type: string replicas: @@ -194,7 +193,6 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true service: - type: object properties: annotations: type: object @@ -207,22 +205,24 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: - type: string enum: - ClusterIP - LoadBalancer - NodePort - serviceAccount: + type: string type: object + serviceAccount: properties: create: type: boolean name: type: string + type: object tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array watchNamespaces: type: string + type: object diff --git a/charts/vald-helm-operator/crds/valdrelease.yaml b/charts/vald-helm-operator/crds/valdrelease.yaml index 07a12af69c..58b6811818 100644 --- a/charts/vald-helm-operator/crds/valdrelease.yaml +++ b/charts/vald-helm-operator/crds/valdrelease.yaml @@ -66,89 +66,86 @@ spec: x-kubernetes-preserve-unknown-fields: true # schema of spec must be generated by hack/helm/schema/crd/main.go. spec: - type: object properties: agent: - type: object properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object algorithm: - type: string enum: - ngt - faiss + type: string annotations: type: object x-kubernetes-preserve-unknown-fields: true clusterRole: - type: object properties: enabled: type: boolean name: type: string - clusterRoleBinding: type: object + clusterRoleBinding: properties: enabled: type: boolean name: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string faiss: - type: object properties: auto_index_check_duration: type: string @@ -159,8 +156,8 @@ spec: auto_save_index_duration: type: string dimension: - type: integer minimum: 1 + type: integer enable_copy_on_write: type: boolean enable_in_memory_mode: @@ -172,10 +169,10 @@ spec: initial_delay_max_duration: type: string kvsdb: - type: object properties: concurrency: type: integer + type: object load_index_timeout_factor: type: string m: @@ -183,15 +180,15 @@ spec: max_load_index_timeout: type: string method_type: - type: string enum: - ivfpq - binaryindex - metric_type: type: string + metric_type: enum: - innerproduct - l2 + type: string min_load_index_timeout: type: string namespace: @@ -203,76 +200,76 @@ spec: pod_name: type: string vqueue: - type: object properties: delete_buffer_pool_size: type: integer insert_buffer_pool_size: type: integer - hpa: + type: object type: object + hpa: properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array kind: - type: string enum: - StatefulSet - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string ngt: - type: object properties: auto_create_index_pool_size: type: integer @@ -285,8 +282,8 @@ spec: auto_save_index_duration: type: string broken_index_history_limit: - type: integer minimum: 0 + type: integer bulk_insert_chunk_size: type: integer creation_edge_size: @@ -298,10 +295,9 @@ spec: default_radius: type: number dimension: - type: integer minimum: 1 + type: integer distance_type: - type: string enum: - l1 - l2 @@ -329,6 +325,7 @@ spec: - innerproduct - dp - ip + type: string enable_copy_on_write: type: boolean enable_export_index_info_to_k8s: @@ -340,8 +337,8 @@ spec: enable_statistics: type: boolean error_buffer_limit: - type: integer minimum: 1 + type: integer export_index_info_duration: type: string index_path: @@ -349,10 +346,10 @@ spec: initial_delay_max_duration: type: string kvsdb: - type: object properties: concurrency: type: integer + type: object load_index_timeout_factor: type: string max_load_index_timeout: @@ -362,34 +359,33 @@ spec: namespace: type: string object_type: - type: string enum: - float - float16 - uint8 + type: string pod_name: type: string search_edge_size: type: integer vqueue: - type: object properties: delete_buffer_pool_size: type: integer insert_buffer_pool_size: type: integer + type: object + type: object nodeName: type: string nodeSelector: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -400,9 +396,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -414,11 +408,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -428,6 +423,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -442,13 +438,14 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean - persistentVolume: + type: object type: object + persistentVolume: properties: accessMode: type: string @@ -460,62 +457,62 @@ spec: type: string storageClass: type: string + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podManagementPolicy: - type: string enum: - OrderedReady - Parallel + type: string podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer readreplica: - type: object properties: component_name: type: string enabled: type: boolean hpa: - type: object properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer + type: object label_key: type: string maxReplicas: - type: integer minimum: 1 - minReplicas: type: integer + minReplicas: minimum: 1 + type: integer name: type: string service: - type: object properties: annotations: type: object x-kubernetes-preserve-unknown-fields: true + type: object snapshot_classname: type: string volume_name: type: string - resources: type: object + resources: properties: limits: type: object @@ -523,11 +520,11 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string @@ -535,31 +532,27 @@ spec: type: string partition: type: integer + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -567,6 +560,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -575,20 +569,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -608,6 +600,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -618,10 +611,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -632,12 +625,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -657,30 +650,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -688,6 +681,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -696,16 +690,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -725,6 +717,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -735,10 +728,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -749,12 +742,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -774,28 +767,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -803,6 +796,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -811,30 +805,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -854,6 +846,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -864,10 +857,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -878,12 +871,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -903,31 +896,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -935,6 +928,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -944,16 +939,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -969,20 +963,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -993,12 +996,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1018,33 +1021,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -1064,6 +1066,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -1074,10 +1077,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1088,12 +1091,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1113,14 +1116,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -1132,8 +1138,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -1141,44 +1148,41 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - serviceAccount: type: object + serviceAccount: properties: enabled: type: boolean name: type: string + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string sidecar: - type: object properties: config: - type: object properties: auto_backup_duration: type: string auto_backup_enabled: type: boolean blob_storage: - type: object properties: bucket: type: string cloud_storage: - type: object properties: client: - type: object properties: credentials_file_path: type: string credentials_json: type: string + type: object url: type: string write_buffer_size: @@ -1193,8 +1197,8 @@ spec: type: string write_content_type: type: string - s3: type: object + s3: properties: access_key: type: string @@ -1215,11 +1219,11 @@ spec: force_path_style: type: boolean max_chunk_size: - type: string pattern: ^[0-9]+(kb|mb|gb)$ - max_part_size: type: string + max_part_size: pattern: ^[0-9]+(kb|mb|gb)$ + type: string max_retries: type: integer region: @@ -1234,19 +1238,18 @@ spec: type: boolean use_dual_stack: type: boolean + type: object storage_type: - type: string enum: - s3 - cloud_storage - client: + type: string type: object + client: properties: net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -1254,8 +1257,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -1263,8 +1266,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -1284,8 +1293,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -1297,11 +1306,11 @@ spec: type: boolean key: type: string - transport: + type: object type: object + transport: properties: backoff: - type: object properties: backoff_factor: type: number @@ -1317,8 +1326,8 @@ spec: type: string retry_count: type: integer - round_tripper: type: object + round_tripper: properties: expect_continue_timeout: type: string @@ -1342,18 +1351,21 @@ spec: type: string write_buffer_size: type: integer - compress: + type: object + type: object type: object + compress: properties: compress_algorithm: - type: string enum: - gob - gzip - lz4 - zstd + type: string compression_level: type: integer + type: object filename: type: string filename_suffix: @@ -1361,7 +1373,6 @@ spec: post_stop_timeout: type: string restore_backoff: - type: object properties: backoff_factor: type: number @@ -1377,62 +1388,62 @@ spec: type: string retry_count: type: integer + type: object restore_backoff_enabled: type: boolean watch_enabled: type: boolean + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainerEnabled: type: boolean logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object name: type: string observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -1443,9 +1454,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -1457,11 +1466,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -1471,6 +1481,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -1485,13 +1496,14 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean - resources: + type: object type: object + resources: properties: limits: type: object @@ -1499,28 +1511,24 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true - server_config: type: object + server_config: properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -1528,6 +1536,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -1536,20 +1545,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -1569,6 +1576,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -1579,10 +1587,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1593,12 +1601,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1618,30 +1626,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -1649,6 +1657,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -1657,16 +1666,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -1686,6 +1693,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -1696,10 +1704,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1710,12 +1718,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1735,28 +1743,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -1764,6 +1772,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -1772,30 +1781,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -1815,6 +1822,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -1825,10 +1833,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1839,12 +1847,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1864,31 +1872,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -1896,6 +1904,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -1905,16 +1915,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -1930,20 +1939,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1954,12 +1972,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1979,33 +1997,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2025,6 +2042,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2035,10 +2053,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2049,12 +2067,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2074,14 +2092,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -2093,8 +2114,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -2107,59 +2129,63 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string + type: object time_zone: type: string version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string + type: object terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - defaults: + type: array type: object + defaults: properties: grpc: - type: object properties: client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -2175,11 +2201,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -2191,8 +2217,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -2204,9 +2230,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -2215,8 +2245,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -2224,13 +2258,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -2238,15 +2272,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -2254,8 +2291,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -2263,8 +2300,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -2284,8 +2327,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -2297,12 +2340,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -2312,7 +2362,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -2324,59 +2373,60 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - image: + type: object type: object + image: properties: tag: type: string - logging: type: object + logging: properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap - networkPolicy: + type: string type: object + networkPolicy: properties: custom: - type: object properties: egress: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - ingress: type: array + ingress: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object enabled: type: boolean - observability: type: object + observability: properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -2387,9 +2437,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -2401,11 +2449,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -2415,6 +2464,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -2429,33 +2479,30 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean - server_config: + type: object type: object + server_config: properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -2463,6 +2510,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -2471,20 +2519,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2504,6 +2550,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2514,10 +2561,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2528,12 +2575,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2553,30 +2600,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -2584,6 +2631,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -2592,16 +2640,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2621,6 +2667,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2631,10 +2678,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2645,12 +2692,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2670,28 +2717,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -2699,6 +2746,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -2707,30 +2755,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2750,6 +2796,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2760,10 +2807,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2774,12 +2821,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2799,31 +2846,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -2831,6 +2878,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -2840,16 +2889,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -2865,20 +2913,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2889,12 +2946,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2914,33 +2971,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2960,6 +3016,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2970,10 +3027,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2984,12 +3041,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3009,14 +3066,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -3028,75 +3088,76 @@ spec: type: boolean key: type: string + type: object + type: object time_zone: type: string - discoverer: type: object + discoverer: properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true clusterRole: - type: object properties: enabled: type: boolean name: type: string - clusterRoleBinding: type: object + clusterRoleBinding: properties: enabled: type: boolean name: type: string - discoverer: type: object + discoverer: properties: discovery_duration: type: string @@ -3105,10 +3166,8 @@ spec: namespace: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -3116,8 +3175,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -3125,8 +3184,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -3146,8 +3211,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -3159,11 +3224,11 @@ spec: type: boolean key: type: string - selectors: + type: object type: object + selectors: properties: node: - type: object properties: fields: type: object @@ -3171,8 +3236,8 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - node_metrics: type: object + node_metrics: properties: fields: type: object @@ -3180,8 +3245,8 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - pod: type: object + pod: properties: fields: type: object @@ -3189,8 +3254,8 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - pod_metrics: type: object + pod_metrics: properties: fields: type: object @@ -3198,8 +3263,8 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - service: type: object + service: properties: fields: type: object @@ -3207,76 +3272,79 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true + type: object + type: object + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string hpa: - type: object properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array internalTrafficPolicy: type: string kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string nodeName: @@ -3285,12 +3353,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -3301,9 +3367,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -3315,11 +3379,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -3329,6 +3394,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -3343,28 +3409,29 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer resources: - type: object properties: limits: type: object @@ -3372,41 +3439,37 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -3414,6 +3477,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -3422,20 +3486,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -3455,6 +3517,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -3465,10 +3528,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3479,12 +3542,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3504,30 +3567,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -3535,6 +3598,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -3543,16 +3607,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -3572,6 +3634,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -3582,10 +3645,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3596,12 +3659,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3621,28 +3684,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -3650,6 +3713,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -3658,30 +3722,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -3701,6 +3763,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -3711,10 +3774,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3725,12 +3788,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3750,31 +3813,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -3782,6 +3845,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -3791,16 +3856,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -3816,20 +3880,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3840,12 +3913,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3865,33 +3938,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -3911,6 +3983,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -3921,10 +3994,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3935,12 +4008,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3960,14 +4033,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -3979,8 +4055,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -3988,125 +4065,126 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - serviceAccount: type: object + serviceAccount: properties: enabled: type: boolean name: type: string + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - gateway: + type: array type: object + gateway: properties: filter: - type: object properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string gateway_config: - type: object properties: egress_filter: - type: object properties: client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -4122,11 +4200,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -4138,8 +4216,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -4151,9 +4229,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -4162,8 +4244,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -4171,13 +4257,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -4185,15 +4271,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -4201,8 +4290,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -4210,8 +4299,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -4231,8 +4326,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -4244,12 +4339,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -4259,7 +4361,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -4271,25 +4372,26 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object distance_filters: - type: array items: type: string - object_filters: type: array + object_filters: items: type: string - gateway_client: + type: array type: object + gateway_client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -4305,11 +4407,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -4321,8 +4423,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -4334,9 +4436,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -4345,8 +4451,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -4354,13 +4464,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -4368,15 +4478,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -4384,8 +4497,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -4393,8 +4506,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -4414,8 +4533,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -4427,12 +4546,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -4442,7 +4568,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -4454,20 +4579,19 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - ingress_filter: type: object + ingress_filter: properties: client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -4483,11 +4607,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -4499,8 +4623,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -4512,9 +4636,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -4523,8 +4651,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -4532,13 +4664,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -4546,15 +4678,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -4562,8 +4697,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -4571,8 +4706,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -4592,8 +4733,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -4605,12 +4746,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -4620,7 +4768,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -4632,57 +4779,60 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object insert_filters: - type: array items: type: string - search_filters: type: array + search_filters: items: type: string - update_filters: type: array + update_filters: items: type: string - upsert_filters: type: array + upsert_filters: items: type: string + type: array vectorizer: type: string - hpa: + type: object type: object + hpa: properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - ingress: type: object + ingress: properties: annotations: type: object x-kubernetes-preserve-unknown-fields: true defaultBackend: - type: object properties: enabled: type: boolean + type: object enabled: type: boolean host: @@ -4691,47 +4841,48 @@ spec: type: string servicePort: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array internalTrafficPolicy: type: string kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string nodeName: @@ -4740,12 +4891,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -4756,9 +4905,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -4770,11 +4917,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -4784,6 +4932,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -4798,28 +4947,29 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer resources: - type: object properties: limits: type: object @@ -4827,41 +4977,37 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -4869,6 +5015,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -4877,20 +5024,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -4910,6 +5055,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -4920,10 +5066,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -4934,12 +5080,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -4959,30 +5105,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -4990,6 +5136,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -4998,16 +5145,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -5027,6 +5172,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -5037,10 +5183,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -5051,12 +5197,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -5076,28 +5222,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -5105,6 +5251,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -5113,30 +5260,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -5156,6 +5301,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -5166,10 +5312,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -5180,12 +5326,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -5205,31 +5351,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -5237,6 +5383,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -5246,16 +5394,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -5271,20 +5418,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -5295,12 +5451,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -5320,33 +5476,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -5366,6 +5521,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -5376,10 +5532,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -5390,12 +5546,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -5415,14 +5571,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -5434,8 +5593,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -5443,117 +5603,119 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - lb: + type: array type: object + lb: properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string gateway_config: - type: object properties: agent_namespace: type: string discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -5569,11 +5731,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -5585,8 +5747,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -5598,9 +5760,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -5609,8 +5775,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -5618,13 +5788,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -5632,15 +5802,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -5648,8 +5821,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -5657,8 +5830,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -5678,8 +5857,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -5691,12 +5870,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -5706,7 +5892,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -5718,17 +5903,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -5744,11 +5929,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -5760,8 +5945,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -5773,9 +5958,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -5784,8 +5973,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -5793,13 +5986,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -5807,15 +6000,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -5823,8 +6019,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -5832,8 +6028,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -5853,8 +6055,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -5866,12 +6068,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -5881,7 +6090,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -5893,19 +6101,19 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string read_client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -5921,11 +6129,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -5937,8 +6145,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -5950,9 +6158,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -5961,8 +6173,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -5970,13 +6186,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -5984,15 +6200,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -6000,8 +6219,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -6009,8 +6228,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -6030,8 +6255,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -6043,12 +6268,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -6058,7 +6290,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -6070,47 +6301,50 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object + type: object index_replica: - type: integer minimum: 1 - multi_operation_concurrency: type: integer + multi_operation_concurrency: minimum: 2 + type: integer node_name: type: string - hpa: type: object + hpa: properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - ingress: type: object + ingress: properties: annotations: type: object x-kubernetes-preserve-unknown-fields: true defaultBackend: - type: object properties: enabled: type: boolean + type: object enabled: type: boolean host: @@ -6119,47 +6353,48 @@ spec: type: string servicePort: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array internalTrafficPolicy: type: string kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string nodeName: @@ -6168,12 +6403,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -6184,9 +6417,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -6198,11 +6429,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -6212,6 +6444,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -6226,28 +6459,29 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer resources: - type: object properties: limits: type: object @@ -6255,41 +6489,37 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -6297,6 +6527,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -6305,20 +6536,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -6338,6 +6567,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -6348,10 +6578,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6362,12 +6592,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6387,30 +6617,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -6418,6 +6648,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -6426,16 +6657,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -6455,6 +6684,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -6465,10 +6695,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6479,12 +6709,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6504,28 +6734,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -6533,6 +6763,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -6541,30 +6772,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -6584,6 +6813,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -6594,10 +6824,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6608,12 +6838,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6633,31 +6863,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -6665,6 +6895,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -6674,16 +6906,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -6699,20 +6930,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6723,12 +6963,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6748,33 +6988,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -6794,6 +7033,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -6804,10 +7044,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6818,12 +7058,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6843,14 +7083,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -6862,8 +7105,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -6871,126 +7115,129 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - mirror: + type: array type: object + mirror: properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true clusterRole: - type: object properties: enabled: type: boolean name: type: string - clusterRoleBinding: type: object + clusterRoleBinding: properties: enabled: type: boolean name: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string gateway_config: - type: object properties: client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -7006,11 +7253,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -7022,8 +7269,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -7035,9 +7282,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -7046,8 +7297,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -7055,13 +7310,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -7069,15 +7324,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -7085,8 +7343,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -7094,8 +7352,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -7115,8 +7379,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -7128,12 +7392,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -7143,7 +7414,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -7155,8 +7425,10 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object colocation: type: string discovery_duration: @@ -7168,10 +7440,8 @@ spec: namespace: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -7179,8 +7449,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -7188,8 +7458,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -7209,8 +7485,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -7222,43 +7498,45 @@ spec: type: boolean key: type: string + type: object + type: object pod_name: type: string register_duration: type: string self_mirror_addr: type: string - hpa: type: object + hpa: properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - ingress: type: object + ingress: properties: annotations: type: object x-kubernetes-preserve-unknown-fields: true defaultBackend: - type: object properties: enabled: type: boolean + type: object enabled: type: boolean host: @@ -7267,47 +7545,48 @@ spec: type: string servicePort: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array internalTrafficPolicy: type: string kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string nodeName: @@ -7316,12 +7595,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -7332,9 +7609,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -7346,11 +7621,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -7360,6 +7636,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -7374,28 +7651,29 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer resources: - type: object properties: limits: type: object @@ -7403,41 +7681,37 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -7445,6 +7719,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -7453,20 +7728,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -7486,6 +7759,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -7496,10 +7770,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7510,12 +7784,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7535,30 +7809,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -7566,6 +7840,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -7574,16 +7849,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -7603,6 +7876,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -7613,10 +7887,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7627,12 +7901,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7652,28 +7926,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -7681,6 +7955,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -7689,30 +7964,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -7732,6 +8005,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -7742,10 +8016,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7756,12 +8030,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7781,31 +8055,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -7813,6 +8087,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -7822,16 +8098,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -7847,20 +8122,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7871,12 +8155,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7896,33 +8180,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -7942,6 +8225,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -7952,10 +8236,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7966,12 +8250,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7991,14 +8275,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -8010,8 +8297,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -8019,118 +8307,165 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - serviceAccount: type: object + serviceAccount: properties: enabled: type: boolean name: type: string + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - manager: + type: array + type: object type: object + manager: properties: index: - type: object properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true corrector: - type: object properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object agent_namespace: type: string discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -8146,11 +8481,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -8162,8 +8497,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -8175,9 +8510,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -8186,8 +8525,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -8195,13 +8538,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -8209,15 +8552,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -8225,8 +8571,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -8234,8 +8580,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -8255,8 +8607,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -8268,12 +8620,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -8283,7 +8642,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -8295,17 +8653,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -8321,11 +8679,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -8337,8 +8695,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -8350,9 +8708,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -8361,8 +8723,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -8370,13 +8736,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -8384,15 +8750,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -8400,8 +8769,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -8409,8 +8778,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -8430,8 +8805,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -8443,12 +8818,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -8458,7 +8840,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -8470,26 +8851,27 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array gateway: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -8505,11 +8887,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -8521,8 +8903,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -8534,9 +8916,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -8545,8 +8931,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -8554,13 +8944,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -8568,15 +8958,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -8584,8 +8977,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -8593,8 +8986,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -8614,8 +9013,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -8627,12 +9026,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -8642,7 +9048,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -8654,26 +9059,28 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array kvs_background_compaction_interval: type: string kvs_background_sync_interval: @@ -8682,13 +9089,14 @@ spec: type: string node_name: type: string - observability: + nodeSelector: type: object + x-kubernetes-preserve-unknown-fields: true + observability: properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -8699,9 +9107,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -8713,11 +9119,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -8727,6 +9134,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -8741,35 +9149,32 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object schedule: type: string server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -8777,6 +9182,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -8785,20 +9191,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -8818,6 +9222,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -8828,10 +9233,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -8842,12 +9247,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -8867,30 +9272,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -8898,6 +9303,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -8906,16 +9312,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -8935,6 +9339,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -8945,10 +9350,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -8959,12 +9364,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -8984,28 +9389,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -9013,6 +9418,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -9021,30 +9427,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -9064,6 +9468,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -9074,10 +9479,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -9088,12 +9493,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -9113,31 +9518,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -9145,6 +9550,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -9154,16 +9561,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -9179,20 +9585,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -9203,12 +9618,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -9228,33 +9643,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -9274,6 +9688,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -9284,10 +9699,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -9298,12 +9713,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -9323,14 +9738,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -9342,40 +9760,89 @@ spec: type: boolean key: type: string + type: object + type: object startingDeadlineSeconds: type: integer stream_list_concurrency: - type: integer minimum: 1 + type: integer suspend: type: boolean + tolerations: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array ttlSecondsAfterFinished: type: integer version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ - creator: + type: string type: object + creator: properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object agent_namespace: type: string concurrency: - type: integer minimum: 1 + type: integer creation_pool_size: type: integer discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -9391,11 +9858,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -9407,8 +9874,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -9420,9 +9887,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -9431,8 +9902,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -9440,13 +9915,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -9454,15 +9929,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -9470,8 +9948,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -9479,8 +9957,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -9500,8 +9984,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -9513,12 +9997,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -9528,7 +10019,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -9540,17 +10030,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -9566,11 +10056,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -9582,8 +10072,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -9595,9 +10085,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -9606,8 +10100,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -9615,13 +10113,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -9629,15 +10127,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -9645,8 +10146,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -9654,8 +10155,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -9675,8 +10182,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -9688,12 +10195,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -9703,7 +10217,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -9715,46 +10228,50 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array name: type: string node_name: type: string - observability: + nodeSelector: type: object + x-kubernetes-preserve-unknown-fields: true + observability: properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -9765,9 +10282,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -9779,11 +10294,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -9793,6 +10309,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -9807,35 +10324,32 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object schedule: type: string server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -9843,6 +10357,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -9851,20 +10366,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -9884,6 +10397,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -9894,10 +10408,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -9908,12 +10422,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -9933,30 +10447,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -9964,6 +10478,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -9972,16 +10487,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -10001,6 +10514,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -10011,10 +10525,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -10025,12 +10539,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -10050,28 +10564,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -10079,6 +10593,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -10087,30 +10602,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -10130,6 +10643,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -10140,10 +10654,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -10154,12 +10668,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -10179,31 +10693,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -10211,6 +10725,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -10220,16 +10736,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -10245,20 +10760,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -10269,12 +10793,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -10294,33 +10818,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -10340,6 +10863,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -10350,10 +10874,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -10364,12 +10888,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -10389,14 +10913,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -10408,43 +10935,50 @@ spec: type: boolean key: type: string + type: object + type: object startingDeadlineSeconds: type: integer suspend: type: boolean target_addrs: - type: array items: type: string + type: array + tolerations: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array ttlSecondsAfterFinished: type: integer version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - indexer: type: object + indexer: properties: agent_namespace: type: string @@ -10459,22 +10993,19 @@ spec: auto_save_index_wait_duration: type: string concurrency: - type: integer minimum: 1 + type: integer creation_pool_size: type: integer discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -10490,11 +11021,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -10506,8 +11037,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -10519,9 +11050,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -10530,8 +11065,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -10539,13 +11078,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -10553,15 +11092,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -10569,8 +11111,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -10578,8 +11120,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -10599,8 +11147,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -10612,12 +11160,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -10627,7 +11182,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -10639,17 +11193,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -10665,11 +11219,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -10681,8 +11235,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -10694,9 +11248,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -10705,8 +11263,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -10714,13 +11276,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -10728,15 +11290,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -10744,8 +11309,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -10753,8 +11318,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -10774,8 +11345,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -10787,12 +11358,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -10802,7 +11380,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -10814,43 +11391,47 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string + type: object node_name: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxUnavailable: type: string name: @@ -10861,12 +11442,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -10877,9 +11456,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -10891,11 +11468,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -10905,6 +11483,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -10919,113 +11498,114 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean - operator: + type: object type: object + operator: properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object name: type: string namespace: @@ -11036,12 +11616,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -11052,9 +11630,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -11066,11 +11642,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -11080,6 +11657,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -11094,31 +11672,32 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer replicas: - type: integer minimum: 0 + type: integer resources: - type: object properties: limits: type: object @@ -11126,44 +11705,40 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object rotation_job_concurrency: - type: integer minimum: 1 + type: integer securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -11171,6 +11746,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -11179,20 +11755,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11212,6 +11786,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11222,10 +11797,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11236,12 +11811,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11261,30 +11836,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -11292,6 +11867,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -11300,16 +11876,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11329,6 +11903,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11339,10 +11914,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11353,12 +11928,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11378,28 +11953,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -11407,6 +11982,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -11415,30 +11991,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11458,6 +12032,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11468,10 +12043,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11482,12 +12057,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11507,31 +12082,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -11539,6 +12114,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -11548,16 +12125,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -11573,20 +12149,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11597,12 +12182,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11622,33 +12207,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11668,6 +12252,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11678,10 +12263,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11692,12 +12277,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11717,14 +12302,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -11736,103 +12324,102 @@ spec: type: boolean key: type: string + type: object + type: object terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer readreplica: - type: object properties: rotator: - type: object properties: agent_namespace: type: string clusterRole: - type: object properties: enabled: type: boolean name: type: string - clusterRoleBinding: type: object + clusterRoleBinding: properties: enabled: type: boolean name: type: string + type: object env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array name: type: string observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -11843,9 +12430,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -11857,11 +12442,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -11871,6 +12457,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -11885,11 +12472,13 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true @@ -11897,27 +12486,22 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -11925,6 +12509,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -11933,20 +12518,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11966,6 +12549,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11976,10 +12560,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11990,12 +12574,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12015,30 +12599,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -12046,6 +12630,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -12054,16 +12639,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -12083,6 +12666,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -12093,10 +12677,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -12107,12 +12691,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12132,28 +12716,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -12161,6 +12745,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -12169,30 +12754,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -12212,6 +12795,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -12222,10 +12806,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -12236,12 +12820,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12261,31 +12845,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -12293,6 +12877,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -12302,16 +12888,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -12327,20 +12912,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -12351,12 +12945,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12376,33 +12970,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -12422,6 +13015,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -12432,10 +13026,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -12446,12 +13040,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12471,14 +13065,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -12490,25 +13087,28 @@ spec: type: boolean key: type: string - serviceAccount: + type: object type: object + serviceAccount: properties: enabled: type: boolean name: type: string + type: object target_read_replica_id_annotations_key: type: string ttlSecondsAfterFinished: type: integer version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string + type: object + type: object replicas: - type: integer minimum: 0 + type: integer resources: - type: object properties: limits: type: object @@ -12516,36 +13116,78 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string - saver: type: object + saver: properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object agent_namespace: type: string concurrency: - type: integer minimum: 1 + type: integer discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -12561,11 +13203,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -12577,8 +13219,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -12590,9 +13232,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -12601,8 +13247,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -12610,13 +13260,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -12624,15 +13274,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -12640,8 +13293,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -12649,8 +13302,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -12670,8 +13329,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -12683,12 +13342,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -12698,7 +13364,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -12710,17 +13375,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -12736,11 +13401,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -12752,8 +13417,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -12765,9 +13430,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -12776,8 +13445,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -12785,13 +13458,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -12799,15 +13472,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -12815,8 +13491,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -12824,8 +13500,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -12845,8 +13527,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -12858,12 +13540,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -12873,7 +13562,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -12885,46 +13573,50 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array name: type: string node_name: type: string - observability: + nodeSelector: type: object + x-kubernetes-preserve-unknown-fields: true + observability: properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -12935,9 +13627,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -12949,11 +13639,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -12963,6 +13654,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -12977,35 +13669,32 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object schedule: type: string server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13013,6 +13702,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13021,20 +13711,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13054,6 +13742,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13064,10 +13753,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13078,12 +13767,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13103,30 +13792,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13134,6 +13823,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13142,16 +13832,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13171,6 +13859,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13181,10 +13870,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13195,12 +13884,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13220,28 +13909,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13249,6 +13938,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13257,30 +13947,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13300,6 +13988,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13310,10 +13999,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13324,12 +14013,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13349,31 +14038,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -13381,6 +14070,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -13390,16 +14081,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -13415,20 +14105,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13439,12 +14138,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13464,33 +14163,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13510,6 +14208,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13520,10 +14219,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13534,12 +14233,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13559,14 +14258,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -13578,44 +14280,47 @@ spec: type: boolean key: type: string + type: object + type: object startingDeadlineSeconds: type: integer suspend: type: boolean target_addrs: - type: array items: type: string + type: array + tolerations: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array ttlSecondsAfterFinished: type: integer version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13623,6 +14328,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13631,20 +14337,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13664,6 +14368,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13674,10 +14379,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13688,12 +14393,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13713,30 +14418,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13744,6 +14449,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13752,16 +14458,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13781,6 +14485,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13791,10 +14496,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13805,12 +14510,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13830,28 +14535,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13859,6 +14564,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13867,30 +14573,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13910,6 +14614,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13920,10 +14625,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13934,12 +14639,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13959,31 +14664,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -13991,6 +14696,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -14000,16 +14707,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -14025,20 +14731,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -14049,12 +14764,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -14074,33 +14789,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -14120,6 +14834,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -14130,10 +14845,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -14144,12 +14859,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -14169,14 +14884,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -14188,8 +14906,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -14197,37 +14916,46 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + type: object diff --git a/charts/vald-helm-operator/values.yaml b/charts/vald-helm-operator/values.yaml index f9a0cbc541..bf49c9132a 100644 --- a/charts/vald-helm-operator/values.yaml +++ b/charts/vald-helm-operator/values.yaml @@ -27,7 +27,7 @@ image: repository: vdaas/vald-helm-operator # @schema {"name": "image.tag", "type": "string"} # image.tag -- image tag - tag: v1.7.13 + tag: v1.7.16 # @schema {"name": "image.pullPolicy", "type": "string", "enum": ["Always", "Never", "IfNotPresent"]} # image.pullPolicy -- image pull policy pullPolicy: Always diff --git a/charts/vald-readreplica/Chart.yaml b/charts/vald-readreplica/Chart.yaml index 4e1f40c33a..e4fd8c24cc 100644 --- a/charts/vald-readreplica/Chart.yaml +++ b/charts/vald-readreplica/Chart.yaml @@ -16,7 +16,7 @@ apiVersion: v2 name: vald-readreplica -version: v1.7.13 +version: v1.7.16 description: A helm chart for readreplica add-on type: application keywords: diff --git a/charts/vald-readreplica/README.md b/charts/vald-readreplica/README.md index 2f5fd70c3e..a04fde6e8e 100644 --- a/charts/vald-readreplica/README.md +++ b/charts/vald-readreplica/README.md @@ -4,7 +4,7 @@ This is a Helm chart to install Vald readreplica components. -Current chart version is `v1.7.13` +Current chart version is `v1.7.16` ## Install diff --git a/charts/vald/Chart.yaml b/charts/vald/Chart.yaml index d44ba3ce6a..56aa50b2a8 100644 --- a/charts/vald/Chart.yaml +++ b/charts/vald/Chart.yaml @@ -16,7 +16,7 @@ apiVersion: v2 name: vald -version: v1.7.13 +version: v1.7.16 description: A distributed high scalable & high-speed approximate nearest neighbor search engine type: application keywords: diff --git a/charts/vald/README.md b/charts/vald/README.md index c2c9e211d4..c89b636251 100644 --- a/charts/vald/README.md +++ b/charts/vald/README.md @@ -2,7 +2,7 @@ This is a Helm chart to install Vald components. -Current chart version is `v1.7.13` +Current chart version is `v1.7.16` ## Table of Contents @@ -44,1019 +44,1067 @@ Run the following command to install the chart, ### Parameters -| Key | Type | Default | Description | -| ------------------------------------------------------------------------------------------------------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -| agent.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | -| agent.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | -| agent.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | -| agent.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | -| agent.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-agent"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | -| agent.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | -| agent.algorithm | string | `"ngt"` | agent algorithm type. it should be `ngt` or `faiss`. | -| agent.annotations | object | `{}` | deployment annotations | -| agent.clusterRole.enabled | bool | `true` | creates clusterRole resource | -| agent.clusterRole.name | string | `"agent"` | name of clusterRole | -| agent.clusterRoleBinding.enabled | bool | `true` | creates clusterRoleBinding resource | -| agent.clusterRoleBinding.name | string | `"agent"` | name of clusterRoleBinding | -| agent.enabled | bool | `true` | agent enabled | -| agent.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| agent.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| agent.faiss.auto_index_check_duration | string | `"30m"` | check duration of automatic indexing | -| agent.faiss.auto_index_duration_limit | string | `"24h"` | limit duration of automatic indexing | -| agent.faiss.auto_index_length | int | `100` | number of cache to trigger automatic indexing | -| agent.faiss.auto_save_index_duration | string | `"35m"` | duration of automatic save index | -| agent.faiss.dimension | int | `4096` | vector dimension | -| agent.faiss.enable_copy_on_write | bool | `false` | enable copy on write saving for more stable backup | -| agent.faiss.enable_in_memory_mode | bool | `true` | in-memory mode enabled | -| agent.faiss.enable_proactive_gc | bool | `false` | enable proactive GC call for reducing heap memory allocation | -| agent.faiss.index_path | string | `""` | path to index data | -| agent.faiss.initial_delay_max_duration | string | `"3m"` | maximum duration for initial delay | -| agent.faiss.kvsdb.concurrency | int | `6` | kvsdb processing concurrency | -| agent.faiss.load_index_timeout_factor | string | `"1ms"` | a factor of load index timeout. timeout duration will be calculated by (index count to be loaded) \* (factor). | -| agent.faiss.m | int | `8` | | -| agent.faiss.max_load_index_timeout | string | `"10m"` | maximum duration of load index timeout | -| agent.faiss.metric_type | string | `"l2"` | | -| agent.faiss.min_load_index_timeout | string | `"3m"` | minimum duration of load index timeout | -| agent.faiss.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of myself | -| agent.faiss.nbits_per_idx | int | `8` | | -| agent.faiss.nlist | int | `100` | | -| agent.faiss.pod_name | string | `"_MY_POD_NAME_"` | pod name of myself | -| agent.faiss.vqueue.delete_buffer_pool_size | int | `5000` | delete slice pool buffer size | -| agent.faiss.vqueue.insert_buffer_pool_size | int | `10000` | insert slice pool buffer size | -| agent.hpa.enabled | bool | `false` | HPA enabled | -| agent.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | -| agent.image.pullPolicy | string | `"Always"` | image pull policy | -| agent.image.repository | string | `"vdaas/vald-agent-ngt"` | image repository | -| agent.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| agent.initContainers | list | `[]` | init containers | -| agent.kind | string | `"StatefulSet"` | deployment kind: Deployment, DaemonSet or StatefulSet | -| agent.logging | object | `{}` | logging config (overrides defaults.logging) | -| agent.maxReplicas | int | `300` | maximum number of replicas. if HPA is disabled, this value will be ignored. | -| agent.maxUnavailable | string | `"1"` | maximum number of unavailable replicas | -| agent.minReplicas | int | `20` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | -| agent.name | string | `"vald-agent"` | name of agent deployment | -| agent.ngt.auto_create_index_pool_size | int | `16` | batch process pool size of automatic create index operation | -| agent.ngt.auto_index_check_duration | string | `"30m"` | check duration of automatic indexing | -| agent.ngt.auto_index_duration_limit | string | `"24h"` | limit duration of automatic indexing | -| agent.ngt.auto_index_length | int | `100` | number of cache to trigger automatic indexing | -| agent.ngt.auto_save_index_duration | string | `"35m"` | duration of automatic save index | -| agent.ngt.broken_index_history_limit | int | `0` | maximum number of broken index generations to backup | -| agent.ngt.bulk_insert_chunk_size | int | `10` | bulk insert chunk size | -| agent.ngt.creation_edge_size | int | `50` | creation edge size | -| agent.ngt.default_epsilon | float | `0.05` | default epsilon used for search | -| agent.ngt.default_pool_size | int | `16` | default create index batch pool size | -| agent.ngt.default_radius | float | `-1` | default radius used for search | -| agent.ngt.dimension | int | `4096` | vector dimension | -| agent.ngt.distance_type | string | `"l2"` | distance type. it should be `l1`, `l2`, `angle`, `hamming`, `cosine`,`poincare`, `lorentz`, `jaccard`, `sparsejaccard`, `normalizedangle` or `normalizedcosine` or `innerproduct`. for further details about NGT libraries supported distance is https://github.com/yahoojapan/NGT/wiki/Command-Quick-Reference and vald agent's supported NGT distance type is https://pkg.go.dev/github.com/vdaas/vald/internal/core/algorithm/ngt#pkg-constants | -| agent.ngt.enable_copy_on_write | bool | `false` | enable copy on write saving for more stable backup | -| agent.ngt.enable_export_index_info_to_k8s | bool | `false` | enable export index info to k8s | -| agent.ngt.enable_in_memory_mode | bool | `true` | in-memory mode enabled | -| agent.ngt.enable_proactive_gc | bool | `false` | enable proactive GC call for reducing heap memory allocation | -| agent.ngt.enable_statistics | bool | `false` | enable index statistics loading | -| agent.ngt.error_buffer_limit | int | `10` | maximum number of core ngt error buffer pool size limit | -| agent.ngt.export_index_info_duration | string | `"1m"` | duration of exporting index info | -| agent.ngt.index_path | string | `""` | path to index data | -| agent.ngt.initial_delay_max_duration | string | `"3m"` | maximum duration for initial delay | -| agent.ngt.kvsdb.concurrency | int | `6` | kvsdb processing concurrency | -| agent.ngt.load_index_timeout_factor | string | `"1ms"` | a factor of load index timeout. timeout duration will be calculated by (index count to be loaded) \* (factor). | -| agent.ngt.max_load_index_timeout | string | `"10m"` | maximum duration of load index timeout | -| agent.ngt.min_load_index_timeout | string | `"3m"` | minimum duration of load index timeout | -| agent.ngt.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of myself | -| agent.ngt.object_type | string | `"float"` | object type. it should be `float` or `uint8` or `float16`. for further details: https://github.com/yahoojapan/NGT/wiki/Command-Quick-Reference | -| agent.ngt.pod_name | string | `"_MY_POD_NAME_"` | pod name of myself | -| agent.ngt.search_edge_size | int | `50` | search edge size | -| agent.ngt.vqueue.delete_buffer_pool_size | int | `5000` | delete slice pool buffer size | -| agent.ngt.vqueue.insert_buffer_pool_size | int | `10000` | insert slice pool buffer size | -| agent.nodeName | string | `""` | node name | -| agent.nodeSelector | object | `{}` | node selector | -| agent.observability | object | `{"otlp":{"attribute":{"service_name":"vald-agent"}}}` | observability config (overrides defaults.observability) | -| agent.persistentVolume.accessMode | string | `"ReadWriteOncePod"` | agent pod storage accessMode | -| agent.persistentVolume.enabled | bool | `false` | enables PVC. It is required to enable if agent pod's file store functionality is enabled with non in-memory mode | -| agent.persistentVolume.mountPropagation | string | `"None"` | agent pod storage mountPropagation | -| agent.persistentVolume.size | string | `"100Gi"` | size of agent pod volume | -| agent.persistentVolume.storageClass | string | `"vald-sc"` | storageClass name for agent pod volume | -| agent.podAnnotations | object | `{}` | pod annotations | -| agent.podManagementPolicy | string | `"OrderedReady"` | pod management policy: OrderedReady or Parallel | -| agent.podPriority.enabled | bool | `true` | agent pod PriorityClass enabled | -| agent.podPriority.value | int | `1000000000` | agent pod PriorityClass value | -| agent.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | -| agent.progressDeadlineSeconds | int | `600` | progress deadline seconds | -| agent.readreplica | object | `{"component_name":"agent-readreplica","enabled":false,"hpa":{"enabled":false,"targetCPUUtilizationPercentage":80},"label_key":"vald-readreplica-id","maxReplicas":3,"minReplicas":1,"name":"vald-agent-ngt-readreplica","service":{"annotations":{}},"snapshot_classname":"","volume_name":"vald-agent-ngt-readreplica-pvc"}` | readreplica deployment annotations | -| agent.readreplica.component_name | string | `"agent-readreplica"` | app.kubernetes.io/component name of agent readreplica | -| agent.readreplica.enabled | bool | `false` | [This feature is WORK IN PROGRESS]enable agent readreplica | -| agent.readreplica.hpa.enabled | bool | `false` | HPA enabled | -| agent.readreplica.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | -| agent.readreplica.label_key | string | `"vald-readreplica-id"` | label key to identify read replica resources | -| agent.readreplica.maxReplicas | int | `3` | maximum number of replicas. if HPA is disabled, this value will be ignored. | -| agent.readreplica.minReplicas | int | `1` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | -| agent.readreplica.name | string | `"vald-agent-ngt-readreplica"` | name of agent readreplica | -| agent.readreplica.service | object | `{"annotations":{}}` | service settings for read replica service resources | -| agent.readreplica.service.annotations | object | `{}` | readreplica deployment annotations | -| agent.readreplica.snapshot_classname | string | `""` | snapshot class name for snapshotter used for read replica | -| agent.readreplica.volume_name | string | `"vald-agent-ngt-readreplica-pvc"` | name of clone volume of agent pvc for read replica | -| agent.resources | object | `{"requests":{"cpu":"300m","memory":"4Gi"}}` | compute resources. recommended setting of memory requests = cluster memory \* 0.4 / number of agent pods | -| agent.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | -| agent.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | -| agent.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | -| agent.rollingUpdate.partition | int | `0` | StatefulSet partition | -| agent.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":false,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | -| agent.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{"startupProbe":{"failureThreshold":200,"periodSeconds":5}}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| agent.service.annotations | object | `{}` | service annotations | -| agent.service.labels | object | `{}` | service labels | -| agent.serviceAccount.enabled | bool | `true` | creates service account | -| agent.serviceAccount.name | string | `"agent-ngt"` | name of service account | -| agent.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | -| agent.sidecar.config.auto_backup_duration | string | `"24h"` | auto backup duration | -| agent.sidecar.config.auto_backup_enabled | bool | `true` | auto backup triggered by timer is enabled | -| agent.sidecar.config.blob_storage.bucket | string | `""` | bucket name | -| agent.sidecar.config.blob_storage.cloud_storage.client.credentials_file_path | string | `""` | credentials file path | -| agent.sidecar.config.blob_storage.cloud_storage.client.credentials_json | string | `""` | credentials json | -| agent.sidecar.config.blob_storage.cloud_storage.url | string | `""` | cloud storage url | -| agent.sidecar.config.blob_storage.cloud_storage.write_buffer_size | int | `0` | bytes of the chunks for upload | -| agent.sidecar.config.blob_storage.cloud_storage.write_cache_control | string | `""` | Cache-Control of HTTP Header | -| agent.sidecar.config.blob_storage.cloud_storage.write_content_disposition | string | `""` | Content-Disposition of HTTP Header | -| agent.sidecar.config.blob_storage.cloud_storage.write_content_encoding | string | `""` | the encoding of the blob's content | -| agent.sidecar.config.blob_storage.cloud_storage.write_content_language | string | `""` | the language of blob's content | -| agent.sidecar.config.blob_storage.cloud_storage.write_content_type | string | `""` | MIME type of the blob | -| agent.sidecar.config.blob_storage.s3.access_key | string | `"_AWS_ACCESS_KEY_"` | s3 access key | -| agent.sidecar.config.blob_storage.s3.enable_100_continue | bool | `true` | enable AWS SDK adding the 'Expect: 100-Continue' header to PUT requests over 2MB of content. | -| agent.sidecar.config.blob_storage.s3.enable_content_md5_validation | bool | `true` | enable the S3 client to add MD5 checksum to upload API calls. | -| agent.sidecar.config.blob_storage.s3.enable_endpoint_discovery | bool | `false` | enable endpoint discovery | -| agent.sidecar.config.blob_storage.s3.enable_endpoint_host_prefix | bool | `true` | enable prefixing request endpoint hosts with modeled information | -| agent.sidecar.config.blob_storage.s3.enable_param_validation | bool | `true` | enables semantic parameter validation | -| agent.sidecar.config.blob_storage.s3.enable_ssl | bool | `true` | enable ssl for s3 session | -| agent.sidecar.config.blob_storage.s3.endpoint | string | `""` | s3 endpoint | -| agent.sidecar.config.blob_storage.s3.force_path_style | bool | `false` | use path-style addressing | -| agent.sidecar.config.blob_storage.s3.max_chunk_size | string | `"64mb"` | s3 download max chunk size | -| agent.sidecar.config.blob_storage.s3.max_part_size | string | `"64mb"` | s3 multipart upload max part size | -| agent.sidecar.config.blob_storage.s3.max_retries | int | `3` | maximum number of retries of s3 client | -| agent.sidecar.config.blob_storage.s3.region | string | `""` | s3 region | -| agent.sidecar.config.blob_storage.s3.secret_access_key | string | `"_AWS_SECRET_ACCESS_KEY_"` | s3 secret access key | -| agent.sidecar.config.blob_storage.s3.token | string | `""` | s3 token | -| agent.sidecar.config.blob_storage.s3.use_accelerate | bool | `false` | enable s3 accelerate feature | -| agent.sidecar.config.blob_storage.s3.use_arn_region | bool | `false` | s3 service client to use the region specified in the ARN | -| agent.sidecar.config.blob_storage.s3.use_dual_stack | bool | `false` | use dual stack | -| agent.sidecar.config.blob_storage.storage_type | string | `"s3"` | storage type | -| agent.sidecar.config.client.net.dialer.dual_stack_enabled | bool | `false` | HTTP client TCP dialer dual stack enabled | -| agent.sidecar.config.client.net.dialer.keepalive | string | `"5m"` | HTTP client TCP dialer keep alive | -| agent.sidecar.config.client.net.dialer.timeout | string | `"5s"` | HTTP client TCP dialer connect timeout | -| agent.sidecar.config.client.net.dns.cache_enabled | bool | `true` | HTTP client TCP DNS cache enabled | -| agent.sidecar.config.client.net.dns.cache_expiration | string | `"24h"` | | -| agent.sidecar.config.client.net.dns.refresh_duration | string | `"1h"` | HTTP client TCP DNS cache expiration | -| agent.sidecar.config.client.net.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| agent.sidecar.config.client.net.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| agent.sidecar.config.client.net.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| agent.sidecar.config.client.net.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| agent.sidecar.config.client.net.socket_option.tcp_cork | bool | `true` | server listen socket option for tcp_cork functionality | -| agent.sidecar.config.client.net.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| agent.sidecar.config.client.net.socket_option.tcp_fast_open | bool | `true` | server listen socket option for tcp_fast_open functionality | -| agent.sidecar.config.client.net.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | -| agent.sidecar.config.client.net.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | -| agent.sidecar.config.client.net.tls.ca | string | `"/path/to/ca"` | TLS ca path | -| agent.sidecar.config.client.net.tls.cert | string | `"/path/to/cert"` | TLS cert path | -| agent.sidecar.config.client.net.tls.enabled | bool | `false` | TLS enabled | -| agent.sidecar.config.client.net.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | -| agent.sidecar.config.client.net.tls.key | string | `"/path/to/key"` | TLS key path | -| agent.sidecar.config.client.transport.backoff.backoff_factor | float | `1.1` | backoff backoff factor | -| agent.sidecar.config.client.transport.backoff.backoff_time_limit | string | `"5s"` | backoff time limit | -| agent.sidecar.config.client.transport.backoff.enable_error_log | bool | `true` | backoff error log enabled | -| agent.sidecar.config.client.transport.backoff.initial_duration | string | `"5ms"` | backoff initial duration | -| agent.sidecar.config.client.transport.backoff.jitter_limit | string | `"100ms"` | backoff jitter limit | -| agent.sidecar.config.client.transport.backoff.maximum_duration | string | `"5s"` | backoff maximum duration | -| agent.sidecar.config.client.transport.backoff.retry_count | int | `100` | backoff retry count | -| agent.sidecar.config.client.transport.round_tripper.expect_continue_timeout | string | `"5s"` | expect continue timeout | -| agent.sidecar.config.client.transport.round_tripper.force_attempt_http_2 | bool | `true` | force attempt HTTP2 | -| agent.sidecar.config.client.transport.round_tripper.idle_conn_timeout | string | `"90s"` | timeout for idle connections | -| agent.sidecar.config.client.transport.round_tripper.max_conns_per_host | int | `10` | maximum count of connections per host | -| agent.sidecar.config.client.transport.round_tripper.max_idle_conns | int | `100` | maximum count of idle connections | -| agent.sidecar.config.client.transport.round_tripper.max_idle_conns_per_host | int | `10` | maximum count of idle connections per host | -| agent.sidecar.config.client.transport.round_tripper.max_response_header_size | int | `0` | maximum response header size | -| agent.sidecar.config.client.transport.round_tripper.read_buffer_size | int | `0` | read buffer size | -| agent.sidecar.config.client.transport.round_tripper.response_header_timeout | string | `"5s"` | timeout for response header | -| agent.sidecar.config.client.transport.round_tripper.tls_handshake_timeout | string | `"5s"` | TLS handshake timeout | -| agent.sidecar.config.client.transport.round_tripper.write_buffer_size | int | `0` | write buffer size | -| agent.sidecar.config.compress.compress_algorithm | string | `"gzip"` | compression algorithm. must be `gob`, `gzip`, `lz4` or `zstd` | -| agent.sidecar.config.compress.compression_level | int | `-1` | compression level. value range relies on which algorithm is used. `gob`: level will be ignored. `gzip`: -1 (default compression), 0 (no compression), or 1 (best speed) to 9 (best compression). `lz4`: >= 0, higher is better compression. `zstd`: 1 (fastest) to 22 (best), however implementation relies on klauspost/compress. | -| agent.sidecar.config.filename | string | `"_MY_POD_NAME_"` | backup filename | -| agent.sidecar.config.filename_suffix | string | `".tar.gz"` | suffix for backup filename | -| agent.sidecar.config.post_stop_timeout | string | `"2m"` | timeout for observing file changes during post stop | -| agent.sidecar.config.restore_backoff.backoff_factor | float | `1.2` | restore backoff factor | -| agent.sidecar.config.restore_backoff.backoff_time_limit | string | `"30m"` | restore backoff time limit | -| agent.sidecar.config.restore_backoff.enable_error_log | bool | `true` | restore backoff log enabled | -| agent.sidecar.config.restore_backoff.initial_duration | string | `"1s"` | restore backoff initial duration | -| agent.sidecar.config.restore_backoff.jitter_limit | string | `"10s"` | restore backoff jitter limit | -| agent.sidecar.config.restore_backoff.maximum_duration | string | `"1m"` | restore backoff maximum duration | -| agent.sidecar.config.restore_backoff.retry_count | int | `100` | restore backoff retry count | -| agent.sidecar.config.restore_backoff_enabled | bool | `false` | restore backoff enabled | -| agent.sidecar.config.watch_enabled | bool | `true` | auto backup triggered by file changes is enabled | -| agent.sidecar.enabled | bool | `false` | sidecar enabled | -| agent.sidecar.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}},{"name":"AWS_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"access-key","name":"aws-secret"}}},{"name":"AWS_SECRET_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"secret-access-key","name":"aws-secret"}}}]` | environment variables | -| agent.sidecar.image.pullPolicy | string | `"Always"` | image pull policy | -| agent.sidecar.image.repository | string | `"vdaas/vald-agent-sidecar"` | image repository | -| agent.sidecar.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| agent.sidecar.initContainerEnabled | bool | `false` | sidecar on initContainer mode enabled. | -| agent.sidecar.logging | object | `{}` | logging config (overrides defaults.logging) | -| agent.sidecar.name | string | `"vald-agent-sidecar"` | name of agent sidecar | -| agent.sidecar.observability | object | `{"otlp":{"attribute":{"service_name":"vald-agent-sidecar"}}}` | observability config (overrides defaults.observability) | -| agent.sidecar.resources | object | `{"requests":{"cpu":"100m","memory":"100Mi"}}` | compute resources. | -| agent.sidecar.server_config | object | `{"healths":{"liveness":{"enabled":false,"port":13000,"servicePort":13000},"readiness":{"enabled":false,"port":13001,"servicePort":13001},"startup":{"enabled":false,"port":13001}},"metrics":{"pprof":{"port":16060,"servicePort":16060}},"servers":{"grpc":{"enabled":false,"port":18081,"servicePort":18081},"rest":{"enabled":false,"port":18080,"servicePort":18080}}}` | server config (overrides defaults.server_config) | -| agent.sidecar.service.annotations | object | `{}` | agent sidecar service annotations | -| agent.sidecar.service.enabled | bool | `false` | agent sidecar service enabled | -| agent.sidecar.service.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| agent.sidecar.service.labels | object | `{}` | agent sidecar service labels | -| agent.sidecar.service.type | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | -| agent.sidecar.time_zone | string | `""` | Time zone | -| agent.sidecar.version | string | `"v0.0.0"` | version of agent sidecar config | -| agent.terminationGracePeriodSeconds | int | `120` | duration in seconds pod needs to terminate gracefully | -| agent.time_zone | string | `""` | Time zone | -| agent.tolerations | list | `[]` | tolerations | -| agent.topologySpreadConstraints | list | `[]` | topology spread constraints for agent pods | -| agent.version | string | `"v0.0.0"` | version of agent config | -| agent.volumeMounts | list | `[]` | volume mounts | -| agent.volumes | list | `[]` | volumes | -| defaults.grpc.client.addrs | list | `[]` | gRPC client addresses | -| defaults.grpc.client.backoff.backoff_factor | float | `1.1` | gRPC client backoff factor | -| defaults.grpc.client.backoff.backoff_time_limit | string | `"5s"` | gRPC client backoff time limit | -| defaults.grpc.client.backoff.enable_error_log | bool | `true` | gRPC client backoff log enabled | -| defaults.grpc.client.backoff.initial_duration | string | `"5ms"` | gRPC client backoff initial duration | -| defaults.grpc.client.backoff.jitter_limit | string | `"100ms"` | gRPC client backoff jitter limit | -| defaults.grpc.client.backoff.maximum_duration | string | `"5s"` | gRPC client backoff maximum duration | -| defaults.grpc.client.backoff.retry_count | int | `100` | gRPC client backoff retry count | -| defaults.grpc.client.call_option.max_recv_msg_size | int | `0` | gRPC client call option max receive message size | -| defaults.grpc.client.call_option.max_retry_rpc_buffer_size | int | `0` | gRPC client call option max retry rpc buffer size | -| defaults.grpc.client.call_option.max_send_msg_size | int | `0` | gRPC client call option max send message size | -| defaults.grpc.client.call_option.wait_for_ready | bool | `true` | gRPC client call option wait for ready | -| defaults.grpc.client.circuit_breaker.closed_error_rate | float | `0.7` | gRPC client circuitbreaker closed error rate | -| defaults.grpc.client.circuit_breaker.closed_refresh_timeout | string | `"10s"` | gRPC client circuitbreaker closed refresh timeout | -| defaults.grpc.client.circuit_breaker.half_open_error_rate | float | `0.5` | gRPC client circuitbreaker half-open error rate | -| defaults.grpc.client.circuit_breaker.min_samples | int | `1000` | gRPC client circuitbreaker minimum sampling count | -| defaults.grpc.client.circuit_breaker.open_timeout | string | `"1s"` | gRPC client circuitbreaker open timeout | -| defaults.grpc.client.connection_pool.enable_dns_resolver | bool | `true` | enables gRPC client connection pool dns resolver, when enabled vald uses ip handshake exclude dns discovery which improves network performance | -| defaults.grpc.client.connection_pool.enable_rebalance | bool | `true` | enables gRPC client connection pool rebalance | -| defaults.grpc.client.connection_pool.old_conn_close_duration | string | `"2m"` | makes delay before gRPC client connection closing during connection pool rebalance | -| defaults.grpc.client.connection_pool.rebalance_duration | string | `"30m"` | gRPC client connection pool rebalance duration | -| defaults.grpc.client.connection_pool.size | int | `3` | gRPC client connection pool size | -| defaults.grpc.client.dial_option.backoff_base_delay | string | `"1s"` | gRPC client dial option base backoff delay | -| defaults.grpc.client.dial_option.backoff_jitter | float | `0.2` | gRPC client dial option base backoff delay | -| defaults.grpc.client.dial_option.backoff_max_delay | string | `"120s"` | gRPC client dial option max backoff delay | -| defaults.grpc.client.dial_option.backoff_multiplier | float | `1.6` | gRPC client dial option base backoff delay | -| defaults.grpc.client.dial_option.enable_backoff | bool | `false` | gRPC client dial option backoff enabled | -| defaults.grpc.client.dial_option.initial_connection_window_size | int | `2097152` | gRPC client dial option initial connection window size | -| defaults.grpc.client.dial_option.initial_window_size | int | `1048576` | gRPC client dial option initial window size | -| defaults.grpc.client.dial_option.insecure | bool | `true` | gRPC client dial option insecure enabled | -| defaults.grpc.client.dial_option.interceptors | list | `[]` | gRPC client interceptors | -| defaults.grpc.client.dial_option.keepalive.permit_without_stream | bool | `false` | gRPC client keep alive permit without stream | -| defaults.grpc.client.dial_option.keepalive.time | string | `""` | gRPC client keep alive time | -| defaults.grpc.client.dial_option.keepalive.timeout | string | `"30s"` | gRPC client keep alive timeout | -| defaults.grpc.client.dial_option.max_msg_size | int | `0` | gRPC client dial option max message size | -| defaults.grpc.client.dial_option.min_connection_timeout | string | `"20s"` | gRPC client dial option minimum connection timeout | -| defaults.grpc.client.dial_option.net.dialer.dual_stack_enabled | bool | `true` | gRPC client TCP dialer dual stack enabled | -| defaults.grpc.client.dial_option.net.dialer.keepalive | string | `""` | gRPC client TCP dialer keep alive | -| defaults.grpc.client.dial_option.net.dialer.timeout | string | `""` | gRPC client TCP dialer timeout | -| defaults.grpc.client.dial_option.net.dns.cache_enabled | bool | `true` | gRPC client TCP DNS cache enabled | -| defaults.grpc.client.dial_option.net.dns.cache_expiration | string | `"1h"` | gRPC client TCP DNS cache expiration | -| defaults.grpc.client.dial_option.net.dns.refresh_duration | string | `"30m"` | gRPC client TCP DNS cache refresh duration | -| defaults.grpc.client.dial_option.net.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| defaults.grpc.client.dial_option.net.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| defaults.grpc.client.dial_option.net.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| defaults.grpc.client.dial_option.net.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| defaults.grpc.client.dial_option.net.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | -| defaults.grpc.client.dial_option.net.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| defaults.grpc.client.dial_option.net.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | -| defaults.grpc.client.dial_option.net.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | -| defaults.grpc.client.dial_option.net.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | -| defaults.grpc.client.dial_option.net.tls.ca | string | `"/path/to/ca"` | TLS ca path | -| defaults.grpc.client.dial_option.net.tls.cert | string | `"/path/to/cert"` | TLS cert path | -| defaults.grpc.client.dial_option.net.tls.enabled | bool | `false` | TLS enabled | -| defaults.grpc.client.dial_option.net.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | -| defaults.grpc.client.dial_option.net.tls.key | string | `"/path/to/key"` | TLS key path | -| defaults.grpc.client.dial_option.read_buffer_size | int | `0` | gRPC client dial option read buffer size | -| defaults.grpc.client.dial_option.timeout | string | `""` | gRPC client dial option timeout | -| defaults.grpc.client.dial_option.write_buffer_size | int | `0` | gRPC client dial option write buffer size | -| defaults.grpc.client.health_check_duration | string | `"1s"` | gRPC client health check duration | -| defaults.grpc.client.tls.ca | string | `"/path/to/ca"` | TLS ca path | -| defaults.grpc.client.tls.cert | string | `"/path/to/cert"` | TLS cert path | -| defaults.grpc.client.tls.enabled | bool | `false` | TLS enabled | -| defaults.grpc.client.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | -| defaults.grpc.client.tls.key | string | `"/path/to/key"` | TLS key path | -| defaults.image.tag | string | `"v1.7.13"` | docker image tag | -| defaults.logging.format | string | `"raw"` | logging format. logging format must be `raw` or `json` | -| defaults.logging.level | string | `"debug"` | logging level. logging level must be `debug`, `info`, `warn`, `error` or `fatal`. | -| defaults.logging.logger | string | `"glg"` | logger name. currently logger must be `glg` or `zap`. | -| defaults.networkPolicy.custom | object | `{"egress":[],"ingress":[]}` | custom network policies that a user can add | -| defaults.networkPolicy.custom.egress | list | `[]` | custom egress network policies that a user can add | -| defaults.networkPolicy.custom.ingress | list | `[]` | custom ingress network policies that a user can add | -| defaults.networkPolicy.enabled | bool | `false` | if network policy enabled | -| defaults.observability.enabled | bool | `false` | observability features enabled | -| defaults.observability.metrics.enable_cgo | bool | `true` | CGO metrics enabled | -| defaults.observability.metrics.enable_goroutine | bool | `true` | goroutine metrics enabled | -| defaults.observability.metrics.enable_memory | bool | `true` | memory metrics enabled | -| defaults.observability.metrics.enable_version_info | bool | `true` | version info metrics enabled | -| defaults.observability.metrics.version_info_labels | list | `["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","algorithm_info"]` | enabled label names of version info | -| defaults.observability.otlp.attribute | object | `{"namespace":"_MY_POD_NAMESPACE_","node_name":"_MY_NODE_NAME_","pod_name":"_MY_POD_NAME_","service_name":"vald"}` | default resource attribute | -| defaults.observability.otlp.attribute.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace | -| defaults.observability.otlp.attribute.node_name | string | `"_MY_NODE_NAME_"` | node name | -| defaults.observability.otlp.attribute.pod_name | string | `"_MY_POD_NAME_"` | pod name | -| defaults.observability.otlp.attribute.service_name | string | `"vald"` | service name | -| defaults.observability.otlp.collector_endpoint | string | `""` | OpenTelemetry Collector endpoint | -| defaults.observability.otlp.metrics_export_interval | string | `"1s"` | metrics export interval | -| defaults.observability.otlp.metrics_export_timeout | string | `"1m"` | metrics export timeout | -| defaults.observability.otlp.trace_batch_timeout | string | `"1s"` | trace batch timeout | -| defaults.observability.otlp.trace_export_timeout | string | `"1m"` | trace export timeout | -| defaults.observability.otlp.trace_max_export_batch_size | int | `1024` | trace maximum export batch size | -| defaults.observability.otlp.trace_max_queue_size | int | `256` | trace maximum queue size | -| defaults.observability.trace.enabled | bool | `false` | trace enabled | -| defaults.server_config.full_shutdown_duration | string | `"600s"` | server full shutdown duration | -| defaults.server_config.healths.liveness.enabled | bool | `true` | liveness server enabled | -| defaults.server_config.healths.liveness.host | string | `"0.0.0.0"` | liveness server host | -| defaults.server_config.healths.liveness.livenessProbe.failureThreshold | int | `2` | liveness probe failure threshold | -| defaults.server_config.healths.liveness.livenessProbe.httpGet.path | string | `"/liveness"` | liveness probe path | -| defaults.server_config.healths.liveness.livenessProbe.httpGet.port | string | `"liveness"` | liveness probe port | -| defaults.server_config.healths.liveness.livenessProbe.httpGet.scheme | string | `"HTTP"` | liveness probe scheme | -| defaults.server_config.healths.liveness.livenessProbe.initialDelaySeconds | int | `5` | liveness probe initial delay seconds | -| defaults.server_config.healths.liveness.livenessProbe.periodSeconds | int | `3` | liveness probe period seconds | -| defaults.server_config.healths.liveness.livenessProbe.successThreshold | int | `1` | liveness probe success threshold | -| defaults.server_config.healths.liveness.livenessProbe.timeoutSeconds | int | `2` | liveness probe timeout seconds | -| defaults.server_config.healths.liveness.port | int | `3000` | liveness server port | -| defaults.server_config.healths.liveness.server.http.handler_timeout | string | `""` | liveness server handler timeout | -| defaults.server_config.healths.liveness.server.http.http2.enabled | bool | `false` | HTTP2 server enabled | -| defaults.server_config.healths.liveness.server.http.http2.handler_limit | int | `0` | Limits the number of http.Handler ServeHTTP goroutines which may run at a time over all connections. Negative or zero no limit. | -| defaults.server_config.healths.liveness.server.http.http2.max_concurrent_streams | int | `0` | The number of concurrent streams that each client may have open at a time. | -| defaults.server_config.healths.liveness.server.http.http2.max_decoder_header_table_size | int | `4096` | Informs the remote endpoint of the maximum size of the header compression table used to decode header blocks, in octets. If zero, the default value of 4096 is used. | -| defaults.server_config.healths.liveness.server.http.http2.max_encoder_header_table_size | int | `4096` | An upper limit for the header compression table used for encoding request headers. | -| defaults.server_config.healths.liveness.server.http.http2.max_read_frame_size | int | `0` | The largest frame this server is willing to read. | -| defaults.server_config.healths.liveness.server.http.http2.max_upload_buffer_per_connection | int | `0` | The size of the initial flow control window for each connections. | -| defaults.server_config.healths.liveness.server.http.http2.max_upload_buffer_per_stream | int | `0` | The size of the initial flow control window for each streams. | -| defaults.server_config.healths.liveness.server.http.http2.permit_prohibited_cipher_suites | bool | `true` | if true, permits the use of cipher suites prohibited by the HTTP/2 spec. | -| defaults.server_config.healths.liveness.server.http.idle_timeout | string | `""` | liveness server idle timeout | -| defaults.server_config.healths.liveness.server.http.read_header_timeout | string | `""` | liveness server read header timeout | -| defaults.server_config.healths.liveness.server.http.read_timeout | string | `""` | liveness server read timeout | -| defaults.server_config.healths.liveness.server.http.shutdown_duration | string | `"5s"` | liveness server shutdown duration | -| defaults.server_config.healths.liveness.server.http.write_timeout | string | `""` | liveness server write timeout | -| defaults.server_config.healths.liveness.server.mode | string | `"REST"` | liveness server mode | -| defaults.server_config.healths.liveness.server.network | string | `"tcp"` | network mode | -| defaults.server_config.healths.liveness.server.probe_wait_time | string | `"3s"` | liveness server probe wait time | -| defaults.server_config.healths.liveness.server.restart | bool | `true` | This configuration enables automatic restart of the same configured server when it becomes unhealthy. | -| defaults.server_config.healths.liveness.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| defaults.server_config.healths.liveness.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| defaults.server_config.healths.liveness.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| defaults.server_config.healths.liveness.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| defaults.server_config.healths.liveness.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | -| defaults.server_config.healths.liveness.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| defaults.server_config.healths.liveness.server.socket_option.tcp_fast_open | bool | `true` | server listen socket option for tcp_fast_open functionality | -| defaults.server_config.healths.liveness.server.socket_option.tcp_no_delay | bool | `true` | server listen socket option for tcp_no_delay functionality | -| defaults.server_config.healths.liveness.server.socket_option.tcp_quick_ack | bool | `true` | server listen socket option for tcp_quick_ack functionality | -| defaults.server_config.healths.liveness.server.socket_path | string | `""` | server socket_path | -| defaults.server_config.healths.liveness.servicePort | int | `3000` | liveness server service port | -| defaults.server_config.healths.readiness.enabled | bool | `true` | readiness server enabled | -| defaults.server_config.healths.readiness.host | string | `"0.0.0.0"` | readiness server host | -| defaults.server_config.healths.readiness.port | int | `3001` | readiness server port | -| defaults.server_config.healths.readiness.readinessProbe.failureThreshold | int | `2` | readiness probe failure threshold | -| defaults.server_config.healths.readiness.readinessProbe.httpGet.path | string | `"/readiness"` | readiness probe path | -| defaults.server_config.healths.readiness.readinessProbe.httpGet.port | string | `"readiness"` | readiness probe port | -| defaults.server_config.healths.readiness.readinessProbe.httpGet.scheme | string | `"HTTP"` | readiness probe scheme | -| defaults.server_config.healths.readiness.readinessProbe.initialDelaySeconds | int | `10` | readiness probe initial delay seconds | -| defaults.server_config.healths.readiness.readinessProbe.periodSeconds | int | `3` | readiness probe period seconds | -| defaults.server_config.healths.readiness.readinessProbe.successThreshold | int | `1` | readiness probe success threshold | -| defaults.server_config.healths.readiness.readinessProbe.timeoutSeconds | int | `2` | readiness probe timeout seconds | -| defaults.server_config.healths.readiness.server.http.handler_timeout | string | `""` | readiness server handler timeout | -| defaults.server_config.healths.readiness.server.http.http2.enabled | bool | `false` | HTTP2 server enabled | -| defaults.server_config.healths.readiness.server.http.http2.handler_limit | int | `0` | Limits the number of http.Handler ServeHTTP goroutines which may run at a time over all connections. Negative or zero no limit. | -| defaults.server_config.healths.readiness.server.http.http2.max_concurrent_streams | int | `0` | The number of concurrent streams that each client may have open at a time. | -| defaults.server_config.healths.readiness.server.http.http2.max_decoder_header_table_size | int | `4096` | Informs the remote endpoint of the maximum size of the header compression table used to decode header blocks, in octets. If zero, the default value of 4096 is used. | -| defaults.server_config.healths.readiness.server.http.http2.max_encoder_header_table_size | int | `4096` | An upper limit for the header compression table used for encoding request headers. | -| defaults.server_config.healths.readiness.server.http.http2.max_read_frame_size | int | `0` | The largest frame this server is willing to read. | -| defaults.server_config.healths.readiness.server.http.http2.max_upload_buffer_per_connection | int | `0` | The size of the initial flow control window for each connections. | -| defaults.server_config.healths.readiness.server.http.http2.max_upload_buffer_per_stream | int | `0` | The size of the initial flow control window for each streams. | -| defaults.server_config.healths.readiness.server.http.http2.permit_prohibited_cipher_suites | bool | `true` | if true, permits the use of cipher suites prohibited by the HTTP/2 spec. | -| defaults.server_config.healths.readiness.server.http.idle_timeout | string | `""` | readiness server idle timeout | -| defaults.server_config.healths.readiness.server.http.read_header_timeout | string | `""` | readiness server read header timeout | -| defaults.server_config.healths.readiness.server.http.read_timeout | string | `""` | readiness server read timeout | -| defaults.server_config.healths.readiness.server.http.shutdown_duration | string | `"0s"` | readiness server shutdown duration | -| defaults.server_config.healths.readiness.server.http.write_timeout | string | `""` | readiness server write timeout | -| defaults.server_config.healths.readiness.server.mode | string | `"REST"` | readiness server mode | -| defaults.server_config.healths.readiness.server.network | string | `"tcp"` | network mode | -| defaults.server_config.healths.readiness.server.probe_wait_time | string | `"3s"` | readiness server probe wait time | -| defaults.server_config.healths.readiness.server.restart | bool | `true` | This configuration enables automatic restart of the same configured server when it becomes unhealthy. | -| defaults.server_config.healths.readiness.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| defaults.server_config.healths.readiness.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| defaults.server_config.healths.readiness.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| defaults.server_config.healths.readiness.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| defaults.server_config.healths.readiness.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | -| defaults.server_config.healths.readiness.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| defaults.server_config.healths.readiness.server.socket_option.tcp_fast_open | bool | `true` | | -| defaults.server_config.healths.readiness.server.socket_option.tcp_no_delay | bool | `true` | server listen socket option for tcp_no_delay functionality | -| defaults.server_config.healths.readiness.server.socket_option.tcp_quick_ack | bool | `true` | server listen socket option for tcp_quick_ack functionality | -| defaults.server_config.healths.readiness.server.socket_path | string | `""` | server socket_path | -| defaults.server_config.healths.readiness.servicePort | int | `3001` | readiness server service port | -| defaults.server_config.healths.startup.enabled | bool | `true` | startup server enabled | -| defaults.server_config.healths.startup.port | int | `3000` | startup server port | -| defaults.server_config.healths.startup.startupProbe.failureThreshold | int | `30` | startup probe failure threshold | -| defaults.server_config.healths.startup.startupProbe.httpGet.path | string | `"/liveness"` | startup probe path | -| defaults.server_config.healths.startup.startupProbe.httpGet.port | string | `"liveness"` | startup probe port | -| defaults.server_config.healths.startup.startupProbe.httpGet.scheme | string | `"HTTP"` | startup probe scheme | -| defaults.server_config.healths.startup.startupProbe.initialDelaySeconds | int | `5` | startup probe initial delay seconds | -| defaults.server_config.healths.startup.startupProbe.periodSeconds | int | `5` | startup probe period seconds | -| defaults.server_config.healths.startup.startupProbe.successThreshold | int | `1` | startup probe success threshold | -| defaults.server_config.healths.startup.startupProbe.timeoutSeconds | int | `2` | startup probe timeout seconds | -| defaults.server_config.metrics.pprof.enabled | bool | `false` | pprof server enabled | -| defaults.server_config.metrics.pprof.host | string | `"0.0.0.0"` | pprof server host | -| defaults.server_config.metrics.pprof.port | int | `6060` | pprof server port | -| defaults.server_config.metrics.pprof.server.http.handler_timeout | string | `"5s"` | pprof server handler timeout | -| defaults.server_config.metrics.pprof.server.http.http2.enabled | bool | `false` | HTTP2 server enabled | -| defaults.server_config.metrics.pprof.server.http.http2.handler_limit | int | `0` | Limits the number of http.Handler ServeHTTP goroutines which may run at a time over all connections. Negative or zero no limit. | -| defaults.server_config.metrics.pprof.server.http.http2.max_concurrent_streams | int | `0` | The number of concurrent streams that each client may have open at a time. | -| defaults.server_config.metrics.pprof.server.http.http2.max_decoder_header_table_size | int | `4096` | Informs the remote endpoint of the maximum size of the header compression table used to decode header blocks, in octets. If zero, the default value of 4096 is used. | -| defaults.server_config.metrics.pprof.server.http.http2.max_encoder_header_table_size | int | `4096` | An upper limit for the header compression table used for encoding request headers. | -| defaults.server_config.metrics.pprof.server.http.http2.max_read_frame_size | int | `0` | The largest frame this server is willing to read. | -| defaults.server_config.metrics.pprof.server.http.http2.max_upload_buffer_per_connection | int | `0` | The size of the initial flow control window for each connections. | -| defaults.server_config.metrics.pprof.server.http.http2.max_upload_buffer_per_stream | int | `0` | The size of the initial flow control window for each streams. | -| defaults.server_config.metrics.pprof.server.http.http2.permit_prohibited_cipher_suites | bool | `true` | if true, permits the use of cipher suites prohibited by the HTTP/2 spec. | -| defaults.server_config.metrics.pprof.server.http.idle_timeout | string | `"2s"` | pprof server idle timeout | -| defaults.server_config.metrics.pprof.server.http.read_header_timeout | string | `"1s"` | pprof server read header timeout | -| defaults.server_config.metrics.pprof.server.http.read_timeout | string | `"1s"` | pprof server read timeout | -| defaults.server_config.metrics.pprof.server.http.shutdown_duration | string | `"5s"` | pprof server shutdown duration | -| defaults.server_config.metrics.pprof.server.http.write_timeout | string | `"1m"` | pprof server write timeout | -| defaults.server_config.metrics.pprof.server.mode | string | `"REST"` | pprof server mode | -| defaults.server_config.metrics.pprof.server.network | string | `"tcp"` | network mode | -| defaults.server_config.metrics.pprof.server.probe_wait_time | string | `"3s"` | pprof server probe wait time | -| defaults.server_config.metrics.pprof.server.restart | bool | `true` | This configuration enables automatic restart of the same configured server when it becomes unhealthy. | -| defaults.server_config.metrics.pprof.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| defaults.server_config.metrics.pprof.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| defaults.server_config.metrics.pprof.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| defaults.server_config.metrics.pprof.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| defaults.server_config.metrics.pprof.server.socket_option.tcp_cork | bool | `true` | server listen socket option for tcp_cork functionality | -| defaults.server_config.metrics.pprof.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| defaults.server_config.metrics.pprof.server.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | -| defaults.server_config.metrics.pprof.server.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | -| defaults.server_config.metrics.pprof.server.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | -| defaults.server_config.metrics.pprof.server.socket_path | string | `""` | server socket_path | -| defaults.server_config.metrics.pprof.servicePort | int | `6060` | pprof server service port | -| defaults.server_config.servers.grpc.enabled | bool | `true` | gRPC server enabled | -| defaults.server_config.servers.grpc.host | string | `"0.0.0.0"` | gRPC server host | -| defaults.server_config.servers.grpc.port | int | `8081` | gRPC server port | -| defaults.server_config.servers.grpc.server.grpc.bidirectional_stream_concurrency | int | `20` | gRPC server bidirectional stream concurrency | -| defaults.server_config.servers.grpc.server.grpc.connection_timeout | string | `""` | gRPC server connection timeout | -| defaults.server_config.servers.grpc.server.grpc.enable_admin | bool | `true` | gRPC server admin option | -| defaults.server_config.servers.grpc.server.grpc.enable_reflection | bool | `true` | gRPC server reflection option | -| defaults.server_config.servers.grpc.server.grpc.header_table_size | int | `0` | gRPC server header table size | -| defaults.server_config.servers.grpc.server.grpc.initial_conn_window_size | int | `2097152` | gRPC server initial connection window size | -| defaults.server_config.servers.grpc.server.grpc.initial_window_size | int | `1048576` | gRPC server initial window size | -| defaults.server_config.servers.grpc.server.grpc.interceptors | list | `["RecoverInterceptor"]` | gRPC server interceptors | -| defaults.server_config.servers.grpc.server.grpc.keepalive.max_conn_age | string | `""` | gRPC server keep alive max connection age | -| defaults.server_config.servers.grpc.server.grpc.keepalive.max_conn_age_grace | string | `""` | gRPC server keep alive max connection age grace | -| defaults.server_config.servers.grpc.server.grpc.keepalive.max_conn_idle | string | `""` | gRPC server keep alive max connection idle | -| defaults.server_config.servers.grpc.server.grpc.keepalive.min_time | string | `"10m"` | gRPC server keep alive min_time | -| defaults.server_config.servers.grpc.server.grpc.keepalive.permit_without_stream | bool | `false` | gRPC server keep alive permit_without_stream | -| defaults.server_config.servers.grpc.server.grpc.keepalive.time | string | `"3h"` | gRPC server keep alive time | -| defaults.server_config.servers.grpc.server.grpc.keepalive.timeout | string | `"60s"` | gRPC server keep alive timeout | -| defaults.server_config.servers.grpc.server.grpc.max_header_list_size | int | `0` | gRPC server max header list size | -| defaults.server_config.servers.grpc.server.grpc.max_receive_message_size | int | `0` | gRPC server max receive message size | -| defaults.server_config.servers.grpc.server.grpc.max_send_message_size | int | `0` | gRPC server max send message size | -| defaults.server_config.servers.grpc.server.grpc.read_buffer_size | int | `0` | gRPC server read buffer size | -| defaults.server_config.servers.grpc.server.grpc.write_buffer_size | int | `0` | gRPC server write buffer size | -| defaults.server_config.servers.grpc.server.mode | string | `"GRPC"` | gRPC server server mode | -| defaults.server_config.servers.grpc.server.network | string | `"tcp"` | network mode | -| defaults.server_config.servers.grpc.server.probe_wait_time | string | `"3s"` | gRPC server probe wait time | -| defaults.server_config.servers.grpc.server.restart | bool | `true` | This configuration enables automatic restart of the same configured server when it becomes unhealthy. | -| defaults.server_config.servers.grpc.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| defaults.server_config.servers.grpc.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| defaults.server_config.servers.grpc.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| defaults.server_config.servers.grpc.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| defaults.server_config.servers.grpc.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | -| defaults.server_config.servers.grpc.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| defaults.server_config.servers.grpc.server.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | -| defaults.server_config.servers.grpc.server.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | -| defaults.server_config.servers.grpc.server.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | -| defaults.server_config.servers.grpc.server.socket_path | string | `""` | server socket_path | -| defaults.server_config.servers.grpc.servicePort | int | `8081` | gRPC server service port | -| defaults.server_config.servers.rest.enabled | bool | `false` | REST server enabled | -| defaults.server_config.servers.rest.host | string | `"0.0.0.0"` | REST server host | -| defaults.server_config.servers.rest.port | int | `8080` | REST server port | -| defaults.server_config.servers.rest.server.http.handler_timeout | string | `"5s"` | REST server handler timeout | -| defaults.server_config.servers.rest.server.http.http2.enabled | bool | `false` | HTTP2 server enabled | -| defaults.server_config.servers.rest.server.http.http2.handler_limit | int | `0` | Limits the number of http.Handler ServeHTTP goroutines which may run at a time over all connections. Negative or zero no limit. | -| defaults.server_config.servers.rest.server.http.http2.max_concurrent_streams | int | `0` | The number of concurrent streams that each client may have open at a time. | -| defaults.server_config.servers.rest.server.http.http2.max_decoder_header_table_size | int | `4096` | Informs the remote endpoint of the maximum size of the header compression table used to decode header blocks, in octets. If zero, the default value of 4096 is used. | -| defaults.server_config.servers.rest.server.http.http2.max_encoder_header_table_size | int | `4096` | An upper limit for the header compression table used for encoding request headers. | -| defaults.server_config.servers.rest.server.http.http2.max_read_frame_size | int | `0` | The largest frame this server is willing to read. | -| defaults.server_config.servers.rest.server.http.http2.max_upload_buffer_per_connection | int | `0` | The size of the initial flow control window for each connections. | -| defaults.server_config.servers.rest.server.http.http2.max_upload_buffer_per_stream | int | `0` | The size of the initial flow control window for each streams. | -| defaults.server_config.servers.rest.server.http.http2.permit_prohibited_cipher_suites | bool | `true` | if true, permits the use of cipher suites prohibited by the HTTP/2 spec. | -| defaults.server_config.servers.rest.server.http.idle_timeout | string | `"2s"` | REST server idle timeout | -| defaults.server_config.servers.rest.server.http.read_header_timeout | string | `"1s"` | REST server read header timeout | -| defaults.server_config.servers.rest.server.http.read_timeout | string | `"1s"` | REST server read timeout | -| defaults.server_config.servers.rest.server.http.shutdown_duration | string | `"5s"` | REST server shutdown duration | -| defaults.server_config.servers.rest.server.http.write_timeout | string | `"1s"` | REST server write timeout | -| defaults.server_config.servers.rest.server.mode | string | `"REST"` | REST server server mode | -| defaults.server_config.servers.rest.server.network | string | `"tcp"` | network mode | -| defaults.server_config.servers.rest.server.probe_wait_time | string | `"3s"` | REST server probe wait time | -| defaults.server_config.servers.rest.server.restart | bool | `true` | | -| defaults.server_config.servers.rest.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| defaults.server_config.servers.rest.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| defaults.server_config.servers.rest.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| defaults.server_config.servers.rest.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| defaults.server_config.servers.rest.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | -| defaults.server_config.servers.rest.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| defaults.server_config.servers.rest.server.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | -| defaults.server_config.servers.rest.server.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | -| defaults.server_config.servers.rest.server.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | -| defaults.server_config.servers.rest.server.socket_path | string | `""` | network socket_path | -| defaults.server_config.servers.rest.servicePort | int | `8080` | REST server service port | -| defaults.server_config.tls.ca | string | `"/path/to/ca"` | TLS ca path | -| defaults.server_config.tls.cert | string | `"/path/to/cert"` | TLS cert path | -| defaults.server_config.tls.enabled | bool | `false` | TLS enabled | -| defaults.server_config.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | -| defaults.server_config.tls.key | string | `"/path/to/key"` | TLS key path | -| defaults.time_zone | string | `"UTC"` | Time zone | -| discoverer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | -| discoverer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | -| discoverer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | -| discoverer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | -| discoverer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-discoverer"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | -| discoverer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | -| discoverer.annotations | object | `{}` | deployment annotations | -| discoverer.clusterRole.enabled | bool | `true` | creates clusterRole resource | -| discoverer.clusterRole.name | string | `"discoverer"` | name of clusterRole | -| discoverer.clusterRoleBinding.enabled | bool | `true` | creates clusterRoleBinding resource | -| discoverer.clusterRoleBinding.name | string | `"discoverer"` | name of clusterRoleBinding | -| discoverer.discoverer.discovery_duration | string | `"3s"` | duration to discovery | -| discoverer.discoverer.name | string | `""` | name to discovery | -| discoverer.discoverer.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace to discovery | -| discoverer.discoverer.net.dialer.dual_stack_enabled | bool | `false` | TCP dialer dual stack enabled | -| discoverer.discoverer.net.dialer.keepalive | string | `"10m"` | TCP dialer keep alive | -| discoverer.discoverer.net.dialer.timeout | string | `"30s"` | TCP dialer timeout | -| discoverer.discoverer.net.dns.cache_enabled | bool | `true` | TCP DNS cache enabled | -| discoverer.discoverer.net.dns.cache_expiration | string | `"24h"` | TCP DNS cache expiration | -| discoverer.discoverer.net.dns.refresh_duration | string | `"5m"` | TCP DNS cache refresh duration | -| discoverer.discoverer.net.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| discoverer.discoverer.net.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| discoverer.discoverer.net.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| discoverer.discoverer.net.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| discoverer.discoverer.net.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | -| discoverer.discoverer.net.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | -| discoverer.discoverer.net.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | -| discoverer.discoverer.net.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | -| discoverer.discoverer.net.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | -| discoverer.discoverer.net.tls.ca | string | `"/path/to/ca"` | TLS ca path | -| discoverer.discoverer.net.tls.cert | string | `"/path/to/cert"` | TLS cert path | -| discoverer.discoverer.net.tls.enabled | bool | `false` | TLS enabled | -| discoverer.discoverer.net.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | -| discoverer.discoverer.net.tls.key | string | `"/path/to/key"` | TLS key path | -| discoverer.discoverer.selectors | object | `{"node":{"fields":{},"labels":{}},"node_metrics":{"fields":{},"labels":{}},"pod":{"fields":{},"labels":{}},"pod_metrics":{"fields":{},"labels":{}},"service":{"fields":{},"labels":{}}}` | k8s resource selectors | -| discoverer.discoverer.selectors.node | object | `{"fields":{},"labels":{}}` | k8s resource selectors for node discovery | -| discoverer.discoverer.selectors.node.fields | object | `{}` | k8s field selectors for node discovery | -| discoverer.discoverer.selectors.node.labels | object | `{}` | k8s label selectors for node discovery | -| discoverer.discoverer.selectors.node_metrics | object | `{"fields":{},"labels":{}}` | k8s resource selectors for node_metrics discovery | -| discoverer.discoverer.selectors.node_metrics.fields | object | `{}` | k8s field selectors for node_metrics discovery | -| discoverer.discoverer.selectors.node_metrics.labels | object | `{}` | k8s label selectors for node_metrics discovery | -| discoverer.discoverer.selectors.pod | object | `{"fields":{},"labels":{}}` | k8s resource selectors for pod discovery | -| discoverer.discoverer.selectors.pod.fields | object | `{}` | k8s field selectors for pod discovery | -| discoverer.discoverer.selectors.pod.labels | object | `{}` | k8s label selectors for pod discovery | -| discoverer.discoverer.selectors.pod_metrics | object | `{"fields":{},"labels":{}}` | k8s resource selectors for pod_metrics discovery | -| discoverer.discoverer.selectors.pod_metrics.fields | object | `{}` | k8s field selectors for pod_metrics discovery | -| discoverer.discoverer.selectors.pod_metrics.labels | object | `{}` | k8s label selectors for pod_metrics discovery | -| discoverer.discoverer.selectors.service | object | `{"fields":{},"labels":{}}` | k8s resource selectors for service discovery | -| discoverer.discoverer.selectors.service.fields | object | `{}` | k8s field selectors for service discovery | -| discoverer.discoverer.selectors.service.labels | object | `{}` | k8s label selectors for service discovery | -| discoverer.enabled | bool | `true` | discoverer enabled | -| discoverer.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| discoverer.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| discoverer.hpa.enabled | bool | `false` | HPA enabled | -| discoverer.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | -| discoverer.image.pullPolicy | string | `"Always"` | image pull policy | -| discoverer.image.repository | string | `"vdaas/vald-discoverer-k8s"` | image repository | -| discoverer.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| discoverer.initContainers | list | `[]` | init containers | -| discoverer.internalTrafficPolicy | string | `""` | internal traffic policy : Cluster or Local | -| discoverer.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | -| discoverer.logging | object | `{}` | logging config (overrides defaults.logging) | -| discoverer.maxReplicas | int | `2` | maximum number of replicas. if HPA is disabled, this value will be ignored. | -| discoverer.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | -| discoverer.minReplicas | int | `1` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | -| discoverer.name | string | `"vald-discoverer"` | name of discoverer deployment | -| discoverer.nodeName | string | `""` | node name | -| discoverer.nodeSelector | object | `{}` | node selector | -| discoverer.observability | object | `{"otlp":{"attribute":{"service_name":"vald-discoverer"}}}` | observability config (overrides defaults.observability) | -| discoverer.podAnnotations | object | `{}` | pod annotations | -| discoverer.podPriority.enabled | bool | `true` | discoverer pod PriorityClass enabled | -| discoverer.podPriority.value | int | `1000000` | discoverer pod PriorityClass value | -| discoverer.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | -| discoverer.progressDeadlineSeconds | int | `600` | progress deadline seconds | -| discoverer.resources | object | `{"limits":{"cpu":"600m","memory":"200Mi"},"requests":{"cpu":"200m","memory":"65Mi"}}` | compute resources | -| discoverer.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | -| discoverer.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | -| discoverer.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | -| discoverer.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | -| discoverer.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| discoverer.service.annotations | object | `{}` | service annotations | -| discoverer.service.labels | object | `{}` | service labels | -| discoverer.serviceAccount.enabled | bool | `true` | creates service account | -| discoverer.serviceAccount.name | string | `"vald"` | name of service account | -| discoverer.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | -| discoverer.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | -| discoverer.time_zone | string | `""` | Time zone | -| discoverer.tolerations | list | `[]` | tolerations | -| discoverer.topologySpreadConstraints | list | `[]` | topology spread constraints of discoverer pods | -| discoverer.version | string | `"v0.0.0"` | version of discoverer config | -| discoverer.volumeMounts | list | `[]` | volume mounts | -| discoverer.volumes | list | `[]` | volumes | -| gateway.filter.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | -| gateway.filter.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | -| gateway.filter.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | -| gateway.filter.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | -| gateway.filter.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-filter-gateway"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | -| gateway.filter.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | -| gateway.filter.annotations | object | `{}` | deployment annotations | -| gateway.filter.enabled | bool | `false` | gateway enabled | -| gateway.filter.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| gateway.filter.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| gateway.filter.gateway_config.egress_filter | object | `{"client":{},"distance_filters":[],"object_filters":[]}` | gRPC client config for egress filter | -| gateway.filter.gateway_config.egress_filter.client | object | `{}` | gRPC client config for egress filter (overrides defaults.grpc.client) | -| gateway.filter.gateway_config.egress_filter.distance_filters | list | `[]` | distance egress vector filter targets | -| gateway.filter.gateway_config.egress_filter.object_filters | list | `[]` | object egress vector filter targets | -| gateway.filter.gateway_config.gateway_client | object | `{}` | gRPC client for next gateway (overrides defaults.grpc.client) | -| gateway.filter.gateway_config.ingress_filter | object | `{"client":{},"insert_filters":[],"search_filters":[],"update_filters":[],"upsert_filters":[],"vectorizer":""}` | gRPC client config for ingress filter | -| gateway.filter.gateway_config.ingress_filter.client | object | `{}` | gRPC client for ingress filter (overrides defaults.grpc.client) | -| gateway.filter.gateway_config.ingress_filter.insert_filters | list | `[]` | insert ingress vector filter targets | -| gateway.filter.gateway_config.ingress_filter.search_filters | list | `[]` | search ingress vector filter targets | -| gateway.filter.gateway_config.ingress_filter.update_filters | list | `[]` | update ingress vector filter targets | -| gateway.filter.gateway_config.ingress_filter.upsert_filters | list | `[]` | upsert ingress vector filter targets | -| gateway.filter.gateway_config.ingress_filter.vectorizer | string | `""` | object ingress vectorize filter targets | -| gateway.filter.hpa.enabled | bool | `true` | HPA enabled | -| gateway.filter.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | -| gateway.filter.image.pullPolicy | string | `"Always"` | image pull policy | -| gateway.filter.image.repository | string | `"vdaas/vald-filter-gateway"` | image repository | -| gateway.filter.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| gateway.filter.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/grpc-backend":"true"}` | annotations for ingress | -| gateway.filter.ingress.defaultBackend | object | `{"enabled":true}` | defaultBackend config | -| gateway.filter.ingress.defaultBackend.enabled | bool | `true` | gateway ingress defaultBackend enabled | -| gateway.filter.ingress.enabled | bool | `false` | gateway ingress enabled | -| gateway.filter.ingress.host | string | `"filter.gateway.vald.vdaas.org"` | ingress hostname | -| gateway.filter.ingress.pathType | string | `"ImplementationSpecific"` | gateway ingress pathType | -| gateway.filter.ingress.servicePort | string | `"grpc"` | service port to be exposed by ingress | -| gateway.filter.initContainers | list | `[{"image":"busybox:stable","name":"wait-for-gateway-lb","sleepDuration":2,"target":"gateway-lb","type":"wait-for"}]` | init containers | -| gateway.filter.internalTrafficPolicy | string | `""` | internal traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| gateway.filter.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | -| gateway.filter.logging | object | `{}` | logging config (overrides defaults.logging) | -| gateway.filter.maxReplicas | int | `9` | maximum number of replicas. if HPA is disabled, this value will be ignored. | -| gateway.filter.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | -| gateway.filter.minReplicas | int | `3` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | -| gateway.filter.name | string | `"vald-filter-gateway"` | name of filter gateway deployment | -| gateway.filter.nodeName | string | `""` | node name | -| gateway.filter.nodeSelector | object | `{}` | node selector | -| gateway.filter.observability | object | `{"otlp":{"attribute":{"service_name":"vald-filter-gateway"}}}` | observability config (overrides defaults.observability) | -| gateway.filter.podAnnotations | object | `{}` | pod annotations | -| gateway.filter.podPriority.enabled | bool | `true` | gateway pod PriorityClass enabled | -| gateway.filter.podPriority.value | int | `1000000` | gateway pod PriorityClass value | -| gateway.filter.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | -| gateway.filter.progressDeadlineSeconds | int | `600` | progress deadline seconds | -| gateway.filter.resources | object | `{"limits":{"cpu":"2000m","memory":"700Mi"},"requests":{"cpu":"200m","memory":"150Mi"}}` | compute resources | -| gateway.filter.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | -| gateway.filter.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | -| gateway.filter.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | -| gateway.filter.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | -| gateway.filter.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| gateway.filter.service.annotations | object | `{}` | service annotations | -| gateway.filter.service.labels | object | `{}` | service labels | -| gateway.filter.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | -| gateway.filter.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | -| gateway.filter.time_zone | string | `""` | Time zone | -| gateway.filter.tolerations | list | `[]` | tolerations | -| gateway.filter.topologySpreadConstraints | list | `[]` | topology spread constraints of gateway pods | -| gateway.filter.version | string | `"v0.0.0"` | version of gateway config | -| gateway.filter.volumeMounts | list | `[]` | volume mounts | -| gateway.filter.volumes | list | `[]` | volumes | -| gateway.lb.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | -| gateway.lb.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | -| gateway.lb.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | -| gateway.lb.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | -| gateway.lb.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-lb-gateway"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | -| gateway.lb.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | -| gateway.lb.annotations | object | `{}` | deployment annotations | -| gateway.lb.enabled | bool | `true` | gateway enabled | -| gateway.lb.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| gateway.lb.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| gateway.lb.gateway_config.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | agent namespace | -| gateway.lb.gateway_config.discoverer.agent_client_options | object | `{}` | gRPC client options for agents (overrides defaults.grpc.client) | -| gateway.lb.gateway_config.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | -| gateway.lb.gateway_config.discoverer.duration | string | `"200ms"` | | -| gateway.lb.gateway_config.discoverer.read_client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | -| gateway.lb.gateway_config.index_replica | int | `3` | number of index replica | -| gateway.lb.gateway_config.multi_operation_concurrency | int | `20` | number of concurrency of multiXXX api's operation | -| gateway.lb.gateway_config.node_name | string | `""` | node name | -| gateway.lb.hpa.enabled | bool | `true` | HPA enabled | -| gateway.lb.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | -| gateway.lb.image.pullPolicy | string | `"Always"` | image pull policy | -| gateway.lb.image.repository | string | `"vdaas/vald-lb-gateway"` | image repository | -| gateway.lb.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| gateway.lb.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/grpc-backend":"true"}` | annotations for ingress | -| gateway.lb.ingress.defaultBackend | object | `{"enabled":true}` | defaultBackend config | -| gateway.lb.ingress.defaultBackend.enabled | bool | `true` | gateway ingress defaultBackend enabled | -| gateway.lb.ingress.enabled | bool | `false` | gateway ingress enabled | -| gateway.lb.ingress.host | string | `"lb.gateway.vald.vdaas.org"` | ingress hostname | -| gateway.lb.ingress.pathType | string | `"ImplementationSpecific"` | gateway ingress pathType | -| gateway.lb.ingress.servicePort | string | `"grpc"` | service port to be exposed by ingress | -| gateway.lb.initContainers | list | `[{"image":"busybox:stable","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"},{"image":"busybox:stable","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"}]` | init containers | -| gateway.lb.internalTrafficPolicy | string | `""` | internal traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| gateway.lb.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | -| gateway.lb.logging | object | `{}` | logging config (overrides defaults.logging) | -| gateway.lb.maxReplicas | int | `9` | maximum number of replicas. if HPA is disabled, this value will be ignored. | -| gateway.lb.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | -| gateway.lb.minReplicas | int | `3` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | -| gateway.lb.name | string | `"vald-lb-gateway"` | name of gateway deployment | -| gateway.lb.nodeName | string | `""` | node name | -| gateway.lb.nodeSelector | object | `{}` | node selector | -| gateway.lb.observability | object | `{"otlp":{"attribute":{"service_name":"vald-lb-gateway"}}}` | observability config (overrides defaults.observability) | -| gateway.lb.podAnnotations | object | `{}` | pod annotations | -| gateway.lb.podPriority.enabled | bool | `true` | gateway pod PriorityClass enabled | -| gateway.lb.podPriority.value | int | `1000000` | gateway pod PriorityClass value | -| gateway.lb.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | -| gateway.lb.progressDeadlineSeconds | int | `600` | progress deadline seconds | -| gateway.lb.resources | object | `{"limits":{"cpu":"2000m","memory":"700Mi"},"requests":{"cpu":"200m","memory":"150Mi"}}` | compute resources | -| gateway.lb.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | -| gateway.lb.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | -| gateway.lb.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | -| gateway.lb.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | -| gateway.lb.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| gateway.lb.service.annotations | object | `{}` | service annotations | -| gateway.lb.service.labels | object | `{}` | service labels | -| gateway.lb.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | -| gateway.lb.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | -| gateway.lb.time_zone | string | `""` | Time zone | -| gateway.lb.tolerations | list | `[]` | tolerations | -| gateway.lb.topologySpreadConstraints | list | `[]` | topology spread constraints of gateway pods | -| gateway.lb.version | string | `"v0.0.0"` | version of gateway config | -| gateway.lb.volumeMounts | list | `[]` | volume mounts | -| gateway.lb.volumes | list | `[]` | volumes | -| gateway.mirror.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | -| gateway.mirror.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | -| gateway.mirror.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | -| gateway.mirror.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | -| gateway.mirror.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-mirror-gateway"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | -| gateway.mirror.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | -| gateway.mirror.annotations | object | `{}` | deployment annotations | -| gateway.mirror.clusterRole.enabled | bool | `true` | creates clusterRole resource | -| gateway.mirror.clusterRole.name | string | `"gateway-mirror"` | name of clusterRole | -| gateway.mirror.clusterRoleBinding.enabled | bool | `true` | creates clusterRoleBinding resource | -| gateway.mirror.clusterRoleBinding.name | string | `"gateway-mirror"` | name of clusterRoleBinding | -| gateway.mirror.enabled | bool | `false` | gateway enabled | -| gateway.mirror.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| gateway.mirror.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| gateway.mirror.gateway_config.client | object | `{}` | gRPC client (overrides defaults.grpc.client) | -| gateway.mirror.gateway_config.colocation | string | `"dc1"` | colocation name | -| gateway.mirror.gateway_config.discovery_duration | string | `"1s"` | duration to discovery | -| gateway.mirror.gateway_config.gateway_addr | string | `""` | address for lb-gateway | -| gateway.mirror.gateway_config.group | string | `""` | mirror group name | -| gateway.mirror.gateway_config.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace to discovery | -| gateway.mirror.gateway_config.net.dialer.dual_stack_enabled | bool | `false` | TCP dialer dual stack enabled | -| gateway.mirror.gateway_config.net.dialer.keepalive | string | `"10m"` | TCP dialer keep alive | -| gateway.mirror.gateway_config.net.dialer.timeout | string | `"30s"` | TCP dialer timeout | -| gateway.mirror.gateway_config.net.dns.cache_enabled | bool | `true` | TCP DNS cache enabled | -| gateway.mirror.gateway_config.net.dns.cache_expiration | string | `"24h"` | TCP DNS cache expiration | -| gateway.mirror.gateway_config.net.dns.refresh_duration | string | `"5m"` | TCP DNS cache refresh duration | -| gateway.mirror.gateway_config.net.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | -| gateway.mirror.gateway_config.net.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | -| gateway.mirror.gateway_config.net.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | -| gateway.mirror.gateway_config.net.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | -| gateway.mirror.gateway_config.net.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | -| gateway.mirror.gateway_config.net.socket_option.tcp_defer_accept | bool | `true` | server listen socket option for tcp_defer_accept functionality | -| gateway.mirror.gateway_config.net.socket_option.tcp_fast_open | bool | `true` | server listen socket option for tcp_fast_open functionality | -| gateway.mirror.gateway_config.net.socket_option.tcp_no_delay | bool | `true` | server listen socket option for tcp_no_delay functionality | -| gateway.mirror.gateway_config.net.socket_option.tcp_quick_ack | bool | `true` | server listen socket option for tcp_quick_ack functionality | -| gateway.mirror.gateway_config.net.tls.ca | string | `"/path/to/ca"` | TLS ca path | -| gateway.mirror.gateway_config.net.tls.cert | string | `"/path/to/cert"` | TLS cert path | -| gateway.mirror.gateway_config.net.tls.enabled | bool | `false` | TLS enabled | -| gateway.mirror.gateway_config.net.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | -| gateway.mirror.gateway_config.net.tls.key | string | `"/path/to/key"` | TLS key path | -| gateway.mirror.gateway_config.pod_name | string | `"_MY_POD_NAME_"` | self mirror gateway pod name | -| gateway.mirror.gateway_config.register_duration | string | `"1s"` | duration to register mirror-gateway. | -| gateway.mirror.gateway_config.self_mirror_addr | string | `""` | address for self mirror-gateway | -| gateway.mirror.hpa.enabled | bool | `true` | HPA enabled | -| gateway.mirror.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | -| gateway.mirror.image.pullPolicy | string | `"Always"` | image pull policy | -| gateway.mirror.image.repository | string | `"vdaas/vald-mirror-gateway"` | image repository | -| gateway.mirror.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| gateway.mirror.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/grpc-backend":"true"}` | annotations for ingress | -| gateway.mirror.ingress.defaultBackend | object | `{"enabled":true}` | defaultBackend config | -| gateway.mirror.ingress.defaultBackend.enabled | bool | `true` | gateway ingress defaultBackend enabled | -| gateway.mirror.ingress.enabled | bool | `false` | gateway ingress enabled | -| gateway.mirror.ingress.host | string | `"mirror.gateway.vald.vdaas.org"` | ingress hostname | -| gateway.mirror.ingress.pathType | string | `"ImplementationSpecific"` | gateway ingress pathType | -| gateway.mirror.ingress.servicePort | string | `"grpc"` | service port to be exposed by ingress | -| gateway.mirror.initContainers | list | `[{"image":"busybox:stable","name":"wait-for-gateway-lb","sleepDuration":2,"target":"gateway-lb","type":"wait-for"}]` | init containers | -| gateway.mirror.internalTrafficPolicy | string | `""` | internal traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| gateway.mirror.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | -| gateway.mirror.logging | object | `{}` | logging config (overrides defaults.logging) | -| gateway.mirror.maxReplicas | int | `9` | maximum number of replicas. if HPA is disabled, this value will be ignored. | -| gateway.mirror.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | -| gateway.mirror.minReplicas | int | `3` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | -| gateway.mirror.name | string | `"vald-mirror-gateway"` | name of gateway deployment | -| gateway.mirror.nodeName | string | `""` | node name | -| gateway.mirror.nodeSelector | object | `{}` | node selector | -| gateway.mirror.observability | object | `{"otlp":{"attribute":{"service_name":"vald-mirror-gateway"}}}` | observability config (overrides defaults.observability) | -| gateway.mirror.podAnnotations | object | `{}` | pod annotations | -| gateway.mirror.podPriority.enabled | bool | `true` | gateway pod PriorityClass enabled | -| gateway.mirror.podPriority.value | int | `1000000` | gateway pod PriorityClass value | -| gateway.mirror.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | -| gateway.mirror.progressDeadlineSeconds | int | `600` | progress deadline seconds | -| gateway.mirror.resources | object | `{"limits":{"cpu":"2000m","memory":"700Mi"},"requests":{"cpu":"200m","memory":"150Mi"}}` | compute resources | -| gateway.mirror.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | -| gateway.mirror.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | -| gateway.mirror.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | -| gateway.mirror.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | -| gateway.mirror.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| gateway.mirror.service.annotations | object | `{}` | service annotations | -| gateway.mirror.service.labels | object | `{}` | service labels | -| gateway.mirror.serviceAccount.enabled | bool | `true` | creates service account | -| gateway.mirror.serviceAccount.name | string | `"gateway-mirror"` | name of service account | -| gateway.mirror.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | -| gateway.mirror.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | -| gateway.mirror.time_zone | string | `""` | Time zone | -| gateway.mirror.tolerations | list | `[]` | tolerations | -| gateway.mirror.topologySpreadConstraints | list | `[]` | topology spread constraints of gateway pods | -| gateway.mirror.version | string | `"v0.0.0"` | version of gateway config | -| gateway.mirror.volumeMounts | list | `[]` | volume mounts | -| gateway.mirror.volumes | list | `[]` | volumes | -| manager.index.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | -| manager.index.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | -| manager.index.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | -| manager.index.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | -| manager.index.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity preferred scheduling terms | -| manager.index.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | -| manager.index.annotations | object | `{}` | deployment annotations | -| manager.index.corrector.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | -| manager.index.corrector.discoverer.agent_client_options | object | `{"dial_option":{"net":{"dialer":{"keepalive":"15m"}}}}` | gRPC client options for agents (overrides defaults.grpc.client) | -| manager.index.corrector.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | -| manager.index.corrector.discoverer.duration | string | `"500ms"` | refresh duration to discover | -| manager.index.corrector.enabled | bool | `false` | enable index correction CronJob | -| manager.index.corrector.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| manager.index.corrector.gateway | object | `{}` | gRPC client for gateway (overrides defaults.grpc.client) | -| manager.index.corrector.image.pullPolicy | string | `"Always"` | | -| manager.index.corrector.image.repository | string | `"vdaas/vald-index-correction"` | image repository | -| manager.index.corrector.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| manager.index.corrector.initContainers | list | `[{"image":"busybox:stable","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"},{"image":"busybox:stable","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"}]` | init containers | -| manager.index.corrector.kvs_background_compaction_interval | string | `"5s"` | interval of checked id list kvs compaction | -| manager.index.corrector.kvs_background_sync_interval | string | `"5s"` | interval of checked id list kvs sync | -| manager.index.corrector.name | string | `"vald-index-correction"` | name of index correction job | -| manager.index.corrector.node_name | string | `""` | node name | -| manager.index.corrector.observability | object | `{"otlp":{"attribute":{"service_name":"vald-index-correction"}}}` | observability config (overrides defaults.observability) | -| manager.index.corrector.schedule | string | `"6 3 * * *"` | CronJob schedule setting for index correction | -| manager.index.corrector.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| manager.index.corrector.startingDeadlineSeconds | int | `86400` | startingDeadlineSeconds setting for K8s completed jobs | -| manager.index.corrector.stream_list_concurrency | int | `200` | concurrency for stream list object rpc | -| manager.index.corrector.suspend | bool | `false` | CronJob suspend setting for index correction | -| manager.index.corrector.ttlSecondsAfterFinished | int | `86400` | ttl setting for K8s completed jobs | -| manager.index.corrector.version | string | `"v0.0.0"` | version of index manager config | -| manager.index.creator.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | -| manager.index.creator.concurrency | int | `1` | concurrency for indexing | -| manager.index.creator.creation_pool_size | int | `16` | number of pool size of create index processing | -| manager.index.creator.discoverer.agent_client_options | object | `{"dial_option":{"net":{"dialer":{"keepalive":"15m"}}}}` | gRPC client options for agents (overrides defaults.grpc.client) | -| manager.index.creator.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | -| manager.index.creator.discoverer.duration | string | `"500ms"` | refresh duration to discover | -| manager.index.creator.enabled | bool | `false` | enable index creation CronJob | -| manager.index.creator.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| manager.index.creator.image.pullPolicy | string | `"Always"` | | -| manager.index.creator.image.repository | string | `"vdaas/vald-index-creation"` | image repository | -| manager.index.creator.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| manager.index.creator.initContainers | list | `[{"image":"busybox:stable","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"},{"image":"busybox:stable","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"}]` | init containers | -| manager.index.creator.name | string | `"vald-index-creation"` | name of index creation job | -| manager.index.creator.node_name | string | `""` | node name | -| manager.index.creator.observability | object | `{"otlp":{"attribute":{"service_name":"vald-index-creation"}}}` | observability config (overrides defaults.observability) | -| manager.index.creator.schedule | string | `"* * * * *"` | CronJob schedule setting for index creation | -| manager.index.creator.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| manager.index.creator.startingDeadlineSeconds | int | `43200` | startingDeadlineSeconds setting for K8s completed jobs | -| manager.index.creator.suspend | bool | `false` | CronJob suspend setting for index creation | -| manager.index.creator.target_addrs | list | `[]` | indexing target addresses | -| manager.index.creator.ttlSecondsAfterFinished | int | `86400` | ttl setting for K8s completed jobs | -| manager.index.creator.version | string | `"v0.0.0"` | version of index manager config | -| manager.index.enabled | bool | `true` | index manager enabled | -| manager.index.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| manager.index.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | -| manager.index.image.pullPolicy | string | `"Always"` | image pull policy | -| manager.index.image.repository | string | `"vdaas/vald-manager-index"` | image repository | -| manager.index.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| manager.index.indexer.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | -| manager.index.indexer.auto_index_check_duration | string | `"1m"` | check duration of automatic indexing | -| manager.index.indexer.auto_index_duration_limit | string | `"30m"` | limit duration of automatic indexing | -| manager.index.indexer.auto_index_length | int | `100` | number of cache to trigger automatic indexing | -| manager.index.indexer.auto_save_index_duration_limit | string | `"3h"` | limit duration of automatic index saving | -| manager.index.indexer.auto_save_index_wait_duration | string | `"10m"` | duration of automatic index saving wait duration for next saving | -| manager.index.indexer.concurrency | int | `1` | concurrency | -| manager.index.indexer.creation_pool_size | int | `16` | number of pool size of create index processing | -| manager.index.indexer.discoverer.agent_client_options | object | `{"dial_option":{"net":{"dialer":{"keepalive":"15m"}}}}` | gRPC client options for agents (overrides defaults.grpc.client) | -| manager.index.indexer.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | -| manager.index.indexer.discoverer.duration | string | `"500ms"` | refresh duration to discover | -| manager.index.indexer.node_name | string | `""` | node name | -| manager.index.initContainers | list | `[{"image":"busybox:stable","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"},{"image":"busybox:stable","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"}]` | init containers | -| manager.index.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | -| manager.index.logging | object | `{}` | logging config (overrides defaults.logging) | -| manager.index.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | -| manager.index.name | string | `"vald-manager-index"` | name of index manager deployment | -| manager.index.nodeName | string | `""` | node name | -| manager.index.nodeSelector | object | `{}` | node selector | -| manager.index.observability | object | `{"otlp":{"attribute":{"service_name":"vald-manager-index"}}}` | observability config (overrides defaults.observability) | -| manager.index.operator | object | `{"affinity":{"nodeAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[],"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[]}},"podAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[],"requiredDuringSchedulingIgnoredDuringExecution":[]},"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-index-operator"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}],"requiredDuringSchedulingIgnoredDuringExecution":[]}},"annotations":{},"enabled":false,"env":[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"image":{"pullPolicy":"Always","repository":"vdaas/vald-index-operator","tag":""},"initContainers":[],"kind":"Deployment","logging":{},"name":"vald-index-operator","namespace":"_MY_POD_NAMESPACE_","nodeName":"","nodeSelector":{},"observability":{"otlp":{"attribute":{"service_name":"vald-index-operator"}}},"podAnnotations":{},"podPriority":{"enabled":true,"value":1000000},"podSecurityContext":{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532},"progressDeadlineSeconds":600,"replicas":1,"resources":{"limits":{"cpu":"600m","memory":"200Mi"},"requests":{"cpu":"200m","memory":"65Mi"}},"revisionHistoryLimit":2,"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"rotation_job_concurrency":2,"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532},"server_config":{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}},"terminationGracePeriodSeconds":30,"time_zone":"","tolerations":[],"topologySpreadConstraints":[],"version":"v0.0.0","volumeMounts":[],"volumes":[]}` | [THIS FEATURE IS WIP] operator that manages vald index | -| manager.index.operator.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | -| manager.index.operator.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | -| manager.index.operator.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | -| manager.index.operator.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | -| manager.index.operator.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-index-operator"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | -| manager.index.operator.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | -| manager.index.operator.annotations | object | `{}` | deployment annotations | -| manager.index.operator.enabled | bool | `false` | index operator enabled | -| manager.index.operator.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| manager.index.operator.image.pullPolicy | string | `"Always"` | image pull policy | -| manager.index.operator.image.repository | string | `"vdaas/vald-index-operator"` | image repository | -| manager.index.operator.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| manager.index.operator.initContainers | list | `[]` | init containers | -| manager.index.operator.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | -| manager.index.operator.logging | object | `{}` | logging config (overrides defaults.logging) | -| manager.index.operator.name | string | `"vald-index-operator"` | name of manager.index.operator deployment | -| manager.index.operator.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace to discovery | -| manager.index.operator.nodeName | string | `""` | node name | -| manager.index.operator.nodeSelector | object | `{}` | node selector | -| manager.index.operator.observability | object | `{"otlp":{"attribute":{"service_name":"vald-index-operator"}}}` | observability config (overrides defaults.observability) | -| manager.index.operator.podAnnotations | object | `{}` | pod annotations | -| manager.index.operator.podPriority.enabled | bool | `true` | manager.index.operator pod PriorityClass enabled | -| manager.index.operator.podPriority.value | int | `1000000` | manager.index.operator pod PriorityClass value | -| manager.index.operator.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | -| manager.index.operator.progressDeadlineSeconds | int | `600` | progress deadline seconds | -| manager.index.operator.replicas | int | `1` | number of replicas. | -| manager.index.operator.resources | object | `{"limits":{"cpu":"600m","memory":"200Mi"},"requests":{"cpu":"200m","memory":"65Mi"}}` | compute resources | -| manager.index.operator.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | -| manager.index.operator.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | -| manager.index.operator.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | -| manager.index.operator.rotation_job_concurrency | int | `2` | maximum concurrent rotator job run. | -| manager.index.operator.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | -| manager.index.operator.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| manager.index.operator.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | -| manager.index.operator.time_zone | string | `""` | Time zone | -| manager.index.operator.tolerations | list | `[]` | tolerations | -| manager.index.operator.topologySpreadConstraints | list | `[]` | topology spread constraints of manager.index.operator pods | -| manager.index.operator.version | string | `"v0.0.0"` | version of index operator config | -| manager.index.operator.volumeMounts | list | `[]` | volume mounts | -| manager.index.operator.volumes | list | `[]` | volumes | -| manager.index.podAnnotations | object | `{}` | pod annotations | -| manager.index.podPriority.enabled | bool | `true` | index manager pod PriorityClass enabled | -| manager.index.podPriority.value | int | `1000000` | index manager pod PriorityClass value | -| manager.index.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | -| manager.index.progressDeadlineSeconds | int | `600` | progress deadline seconds | -| manager.index.readreplica.rotator | object | `{"agent_namespace":"_MY_POD_NAMESPACE_","clusterRole":{"enabled":true,"name":"vald-readreplica-rotate"},"clusterRoleBinding":{"enabled":true,"name":"vald-readreplica-rotate"},"env":[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"image":{"pullPolicy":"Always","repository":"vdaas/vald-readreplica-rotate","tag":""},"initContainers":[],"name":"vald-readreplica-rotate","observability":{"otlp":{"attribute":{"service_name":"vald-readreplica-rotate"}}},"podSecurityContext":{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532},"server_config":{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}},"serviceAccount":{"enabled":true,"name":"vald-readreplica-rotate"},"target_read_replica_id_annotations_key":"vald.vdaas.org/target-read-replica-id","ttlSecondsAfterFinished":86400,"version":"v0.0.0"}` | [This feature is work in progress] readreplica agents rotation job | -| manager.index.readreplica.rotator.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | -| manager.index.readreplica.rotator.clusterRole.enabled | bool | `true` | creates clusterRole resource | -| manager.index.readreplica.rotator.clusterRole.name | string | `"vald-readreplica-rotate"` | name of clusterRole | -| manager.index.readreplica.rotator.clusterRoleBinding.enabled | bool | `true` | creates clusterRoleBinding resource | -| manager.index.readreplica.rotator.clusterRoleBinding.name | string | `"vald-readreplica-rotate"` | name of clusterRoleBinding | -| manager.index.readreplica.rotator.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| manager.index.readreplica.rotator.image.repository | string | `"vdaas/vald-readreplica-rotate"` | image repository | -| manager.index.readreplica.rotator.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| manager.index.readreplica.rotator.initContainers | list | `[]` | init containers | -| manager.index.readreplica.rotator.name | string | `"vald-readreplica-rotate"` | name of readreplica rotator job | -| manager.index.readreplica.rotator.observability | object | `{"otlp":{"attribute":{"service_name":"vald-readreplica-rotate"}}}` | observability config (overrides defaults.observability) | -| manager.index.readreplica.rotator.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | -| manager.index.readreplica.rotator.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | -| manager.index.readreplica.rotator.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| manager.index.readreplica.rotator.serviceAccount.enabled | bool | `true` | creates service account | -| manager.index.readreplica.rotator.serviceAccount.name | string | `"vald-readreplica-rotate"` | name of service account | -| manager.index.readreplica.rotator.target_read_replica_id_annotations_key | string | `"vald.vdaas.org/target-read-replica-id"` | name of annotations key for target read replica id | -| manager.index.readreplica.rotator.ttlSecondsAfterFinished | int | `86400` | ttl setting for K8s completed jobs | -| manager.index.readreplica.rotator.version | string | `"v0.0.0"` | version of readreplica rotator config | -| manager.index.replicas | int | `1` | number of replicas | -| manager.index.resources | object | `{"limits":{"cpu":"1000m","memory":"500Mi"},"requests":{"cpu":"200m","memory":"80Mi"}}` | compute resources | -| manager.index.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | -| manager.index.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | -| manager.index.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | -| manager.index.saver.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | -| manager.index.saver.concurrency | int | `1` | concurrency for index saving | -| manager.index.saver.discoverer.agent_client_options | object | `{"dial_option":{"net":{"dialer":{"keepalive":"15m"}}}}` | gRPC client options for agents (overrides defaults.grpc.client) | -| manager.index.saver.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | -| manager.index.saver.discoverer.duration | string | `"500ms"` | refresh duration to discover | -| manager.index.saver.enabled | bool | `false` | enable index save CronJob | -| manager.index.saver.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | -| manager.index.saver.image.pullPolicy | string | `"Always"` | | -| manager.index.saver.image.repository | string | `"vdaas/vald-index-save"` | image repository | -| manager.index.saver.image.tag | string | `""` | image tag (overrides defaults.image.tag) | -| manager.index.saver.initContainers | list | `[{"image":"busybox:stable","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"},{"image":"busybox:stable","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"}]` | init containers | -| manager.index.saver.name | string | `"vald-index-save"` | name of index save job | -| manager.index.saver.node_name | string | `""` | node name | -| manager.index.saver.observability | object | `{"otlp":{"attribute":{"service_name":"vald-index-save"}}}` | observability config (overrides defaults.observability) | -| manager.index.saver.schedule | string | `"0 */3 * * *"` | CronJob schedule setting for index save | -| manager.index.saver.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| manager.index.saver.startingDeadlineSeconds | int | `43200` | startingDeadlineSeconds setting for K8s completed jobs | -| manager.index.saver.suspend | bool | `false` | CronJob suspend setting for index creation | -| manager.index.saver.target_addrs | list | `[]` | index saving target addresses | -| manager.index.saver.ttlSecondsAfterFinished | int | `86400` | ttl setting for K8s completed jobs | -| manager.index.saver.version | string | `"v0.0.0"` | version of index manager config | -| manager.index.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | -| manager.index.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | -| manager.index.service.annotations | object | `{}` | service annotations | -| manager.index.service.labels | object | `{}` | service labels | -| manager.index.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | -| manager.index.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | -| manager.index.time_zone | string | `""` | Time zone | -| manager.index.tolerations | list | `[]` | tolerations | -| manager.index.topologySpreadConstraints | list | `[]` | topology spread constraints of index manager pods | -| manager.index.version | string | `"v0.0.0"` | version of index manager config | -| manager.index.volumeMounts | list | `[]` | volume mounts | -| manager.index.volumes | list | `[]` | volumes | +| Key | Type | Default | Description | +| -------------------------------------------------------------------------------------------------------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | +| agent.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| agent.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| agent.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| agent.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| agent.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-agent"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | +| agent.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| agent.algorithm | string | `"ngt"` | agent algorithm type. it should be `ngt` or `faiss`. | +| agent.annotations | object | `{}` | deployment annotations | +| agent.clusterRole.enabled | bool | `true` | creates clusterRole resource | +| agent.clusterRole.name | string | `"agent"` | name of clusterRole | +| agent.clusterRoleBinding.enabled | bool | `true` | creates clusterRoleBinding resource | +| agent.clusterRoleBinding.name | string | `"agent"` | name of clusterRoleBinding | +| agent.enabled | bool | `true` | agent enabled | +| agent.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| agent.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| agent.faiss.auto_index_check_duration | string | `"30m"` | check duration of automatic indexing | +| agent.faiss.auto_index_duration_limit | string | `"24h"` | limit duration of automatic indexing | +| agent.faiss.auto_index_length | int | `100` | number of cache to trigger automatic indexing | +| agent.faiss.auto_save_index_duration | string | `"35m"` | duration of automatic save index | +| agent.faiss.dimension | int | `4096` | vector dimension | +| agent.faiss.enable_copy_on_write | bool | `false` | enable copy on write saving for more stable backup | +| agent.faiss.enable_in_memory_mode | bool | `true` | in-memory mode enabled | +| agent.faiss.enable_proactive_gc | bool | `false` | enable proactive GC call for reducing heap memory allocation | +| agent.faiss.index_path | string | `""` | path to index data | +| agent.faiss.initial_delay_max_duration | string | `"3m"` | maximum duration for initial delay | +| agent.faiss.kvsdb.concurrency | int | `6` | kvsdb processing concurrency | +| agent.faiss.load_index_timeout_factor | string | `"1ms"` | a factor of load index timeout. timeout duration will be calculated by (index count to be loaded) \* (factor). | +| agent.faiss.m | int | `8` | m | +| agent.faiss.max_load_index_timeout | string | `"10m"` | maximum duration of load index timeout | +| agent.faiss.method_type | string | `"ivfpq"` | method type it should be `ivfpq` or `binaryindex` | +| agent.faiss.metric_type | string | `"l2"` | metric type it should be `innerproduct` or `l2` | +| agent.faiss.min_load_index_timeout | string | `"3m"` | minimum duration of load index timeout | +| agent.faiss.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of myself | +| agent.faiss.nbits_per_idx | int | `8` | nbits_per_idx | +| agent.faiss.nlist | int | `100` | nlist | +| agent.faiss.pod_name | string | `"_MY_POD_NAME_"` | pod name of myself | +| agent.faiss.vqueue.delete_buffer_pool_size | int | `5000` | delete slice pool buffer size | +| agent.faiss.vqueue.insert_buffer_pool_size | int | `10000` | insert slice pool buffer size | +| agent.hpa.enabled | bool | `false` | HPA enabled | +| agent.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | +| agent.image.pullPolicy | string | `"Always"` | image pull policy | +| agent.image.repository | string | `"vdaas/vald-agent-ngt"` | image repository | +| agent.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| agent.initContainers | list | `[]` | init containers | +| agent.kind | string | `"StatefulSet"` | deployment kind: Deployment, DaemonSet or StatefulSet | +| agent.logging | object | `{}` | logging config (overrides defaults.logging) | +| agent.maxReplicas | int | `300` | maximum number of replicas. if HPA is disabled, this value will be ignored. | +| agent.maxUnavailable | string | `"1"` | maximum number of unavailable replicas | +| agent.minReplicas | int | `20` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | +| agent.name | string | `"vald-agent"` | name of agent deployment | +| agent.ngt.auto_create_index_pool_size | int | `16` | batch process pool size of automatic create index operation | +| agent.ngt.auto_index_check_duration | string | `"30m"` | check duration of automatic indexing | +| agent.ngt.auto_index_duration_limit | string | `"24h"` | limit duration of automatic indexing | +| agent.ngt.auto_index_length | int | `100` | number of cache to trigger automatic indexing | +| agent.ngt.auto_save_index_duration | string | `"35m"` | duration of automatic save index | +| agent.ngt.broken_index_history_limit | int | `0` | maximum number of broken index generations to backup | +| agent.ngt.bulk_insert_chunk_size | int | `10` | bulk insert chunk size | +| agent.ngt.creation_edge_size | int | `50` | creation edge size | +| agent.ngt.default_epsilon | float | `0.05` | default epsilon used for search | +| agent.ngt.default_pool_size | int | `16` | default create index batch pool size | +| agent.ngt.default_radius | float | `-1` | default radius used for search | +| agent.ngt.dimension | int | `4096` | vector dimension | +| agent.ngt.distance_type | string | `"l2"` | distance type. it should be `l1`, `l2`, `angle`, `hamming`, `cosine`,`poincare`, `lorentz`, `jaccard`, `sparsejaccard`, `normalizedangle` or `normalizedcosine` or `innerproduct`. for further details about NGT libraries supported distance is https://github.com/yahoojapan/NGT/wiki/Command-Quick-Reference and vald agent's supported NGT distance type is https://pkg.go.dev/github.com/vdaas/vald/internal/core/algorithm/ngt#pkg-constants | +| agent.ngt.enable_copy_on_write | bool | `false` | enable copy on write saving for more stable backup | +| agent.ngt.enable_export_index_info_to_k8s | bool | `false` | enable export index info to k8s | +| agent.ngt.enable_in_memory_mode | bool | `true` | in-memory mode enabled | +| agent.ngt.enable_proactive_gc | bool | `false` | enable proactive GC call for reducing heap memory allocation | +| agent.ngt.enable_statistics | bool | `false` | enable index statistics loading | +| agent.ngt.error_buffer_limit | int | `10` | maximum number of core ngt error buffer pool size limit | +| agent.ngt.export_index_info_duration | string | `"1m"` | duration of exporting index info | +| agent.ngt.index_path | string | `""` | path to index data | +| agent.ngt.initial_delay_max_duration | string | `"3m"` | maximum duration for initial delay | +| agent.ngt.kvsdb.concurrency | int | `6` | kvsdb processing concurrency | +| agent.ngt.load_index_timeout_factor | string | `"1ms"` | a factor of load index timeout. timeout duration will be calculated by (index count to be loaded) \* (factor). | +| agent.ngt.max_load_index_timeout | string | `"10m"` | maximum duration of load index timeout | +| agent.ngt.min_load_index_timeout | string | `"3m"` | minimum duration of load index timeout | +| agent.ngt.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of myself | +| agent.ngt.object_type | string | `"float"` | object type. it should be `float` or `uint8` or `float16`. for further details: https://github.com/yahoojapan/NGT/wiki/Command-Quick-Reference | +| agent.ngt.pod_name | string | `"_MY_POD_NAME_"` | pod name of myself | +| agent.ngt.search_edge_size | int | `50` | search edge size | +| agent.ngt.vqueue.delete_buffer_pool_size | int | `5000` | delete slice pool buffer size | +| agent.ngt.vqueue.insert_buffer_pool_size | int | `10000` | insert slice pool buffer size | +| agent.nodeName | string | `""` | node name | +| agent.nodeSelector | object | `{}` | node selector | +| agent.observability | object | `{"otlp":{"attribute":{"service_name":"vald-agent"}}}` | observability config (overrides defaults.observability) | +| agent.persistentVolume.accessMode | string | `"ReadWriteOncePod"` | agent pod storage accessMode | +| agent.persistentVolume.enabled | bool | `false` | enables PVC. It is required to enable if agent pod's file store functionality is enabled with non in-memory mode | +| agent.persistentVolume.mountPropagation | string | `"None"` | agent pod storage mountPropagation | +| agent.persistentVolume.size | string | `"100Gi"` | size of agent pod volume | +| agent.persistentVolume.storageClass | string | `"vald-sc"` | storageClass name for agent pod volume | +| agent.podAnnotations | object | `{}` | pod annotations | +| agent.podManagementPolicy | string | `"OrderedReady"` | pod management policy: OrderedReady or Parallel | +| agent.podPriority.enabled | bool | `true` | agent pod PriorityClass enabled | +| agent.podPriority.value | int | `1000000000` | agent pod PriorityClass value | +| agent.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | +| agent.progressDeadlineSeconds | int | `600` | progress deadline seconds | +| agent.readreplica | object | `{"component_name":"agent-readreplica","enabled":false,"hpa":{"enabled":false,"targetCPUUtilizationPercentage":80},"label_key":"vald-readreplica-id","maxReplicas":3,"minReplicas":1,"name":"vald-agent-ngt-readreplica","service":{"annotations":{}},"snapshot_classname":"","volume_name":"vald-agent-ngt-readreplica-pvc"}` | readreplica deployment annotations | +| agent.readreplica.component_name | string | `"agent-readreplica"` | app.kubernetes.io/component name of agent readreplica | +| agent.readreplica.enabled | bool | `false` | [This feature is WORK IN PROGRESS]enable agent readreplica | +| agent.readreplica.hpa.enabled | bool | `false` | HPA enabled | +| agent.readreplica.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | +| agent.readreplica.label_key | string | `"vald-readreplica-id"` | label key to identify read replica resources | +| agent.readreplica.maxReplicas | int | `3` | maximum number of replicas. if HPA is disabled, this value will be ignored. | +| agent.readreplica.minReplicas | int | `1` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | +| agent.readreplica.name | string | `"vald-agent-ngt-readreplica"` | name of agent readreplica | +| agent.readreplica.service | object | `{"annotations":{}}` | service settings for read replica service resources | +| agent.readreplica.service.annotations | object | `{}` | readreplica deployment annotations | +| agent.readreplica.snapshot_classname | string | `""` | snapshot class name for snapshotter used for read replica | +| agent.readreplica.volume_name | string | `"vald-agent-ngt-readreplica-pvc"` | name of clone volume of agent pvc for read replica | +| agent.resources | object | `{"requests":{"cpu":"300m","memory":"4Gi"}}` | compute resources. recommended setting of memory requests = cluster memory \* 0.4 / number of agent pods | +| agent.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | +| agent.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | +| agent.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | +| agent.rollingUpdate.partition | int | `0` | StatefulSet partition | +| agent.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":false,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | +| agent.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{"startupProbe":{"failureThreshold":200,"periodSeconds":5}}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| agent.service.annotations | object | `{}` | service annotations | +| agent.service.labels | object | `{}` | service labels | +| agent.serviceAccount.enabled | bool | `true` | creates service account | +| agent.serviceAccount.name | string | `"agent-ngt"` | name of service account | +| agent.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | +| agent.sidecar.config.auto_backup_duration | string | `"24h"` | auto backup duration | +| agent.sidecar.config.auto_backup_enabled | bool | `true` | auto backup triggered by timer is enabled | +| agent.sidecar.config.blob_storage.bucket | string | `""` | bucket name | +| agent.sidecar.config.blob_storage.cloud_storage.client.credentials_file_path | string | `""` | credentials file path | +| agent.sidecar.config.blob_storage.cloud_storage.client.credentials_json | string | `""` | credentials json | +| agent.sidecar.config.blob_storage.cloud_storage.url | string | `""` | cloud storage url | +| agent.sidecar.config.blob_storage.cloud_storage.write_buffer_size | int | `0` | bytes of the chunks for upload | +| agent.sidecar.config.blob_storage.cloud_storage.write_cache_control | string | `""` | Cache-Control of HTTP Header | +| agent.sidecar.config.blob_storage.cloud_storage.write_content_disposition | string | `""` | Content-Disposition of HTTP Header | +| agent.sidecar.config.blob_storage.cloud_storage.write_content_encoding | string | `""` | the encoding of the blob's content | +| agent.sidecar.config.blob_storage.cloud_storage.write_content_language | string | `""` | the language of blob's content | +| agent.sidecar.config.blob_storage.cloud_storage.write_content_type | string | `""` | MIME type of the blob | +| agent.sidecar.config.blob_storage.s3.access_key | string | `"_AWS_ACCESS_KEY_"` | s3 access key | +| agent.sidecar.config.blob_storage.s3.enable_100_continue | bool | `true` | enable AWS SDK adding the 'Expect: 100-Continue' header to PUT requests over 2MB of content. | +| agent.sidecar.config.blob_storage.s3.enable_content_md5_validation | bool | `true` | enable the S3 client to add MD5 checksum to upload API calls. | +| agent.sidecar.config.blob_storage.s3.enable_endpoint_discovery | bool | `false` | enable endpoint discovery | +| agent.sidecar.config.blob_storage.s3.enable_endpoint_host_prefix | bool | `true` | enable prefixing request endpoint hosts with modeled information | +| agent.sidecar.config.blob_storage.s3.enable_param_validation | bool | `true` | enables semantic parameter validation | +| agent.sidecar.config.blob_storage.s3.enable_ssl | bool | `true` | enable ssl for s3 session | +| agent.sidecar.config.blob_storage.s3.endpoint | string | `""` | s3 endpoint | +| agent.sidecar.config.blob_storage.s3.force_path_style | bool | `false` | use path-style addressing | +| agent.sidecar.config.blob_storage.s3.max_chunk_size | string | `"64mb"` | s3 download max chunk size | +| agent.sidecar.config.blob_storage.s3.max_part_size | string | `"64mb"` | s3 multipart upload max part size | +| agent.sidecar.config.blob_storage.s3.max_retries | int | `3` | maximum number of retries of s3 client | +| agent.sidecar.config.blob_storage.s3.region | string | `""` | s3 region | +| agent.sidecar.config.blob_storage.s3.secret_access_key | string | `"_AWS_SECRET_ACCESS_KEY_"` | s3 secret access key | +| agent.sidecar.config.blob_storage.s3.token | string | `""` | s3 token | +| agent.sidecar.config.blob_storage.s3.use_accelerate | bool | `false` | enable s3 accelerate feature | +| agent.sidecar.config.blob_storage.s3.use_arn_region | bool | `false` | s3 service client to use the region specified in the ARN | +| agent.sidecar.config.blob_storage.s3.use_dual_stack | bool | `false` | use dual stack | +| agent.sidecar.config.blob_storage.storage_type | string | `"s3"` | storage type | +| agent.sidecar.config.client.net.dialer.dual_stack_enabled | bool | `false` | HTTP client TCP dialer dual stack enabled | +| agent.sidecar.config.client.net.dialer.keepalive | string | `"5m"` | HTTP client TCP dialer keep alive | +| agent.sidecar.config.client.net.dialer.timeout | string | `"5s"` | HTTP client TCP dialer connect timeout | +| agent.sidecar.config.client.net.dns.cache_enabled | bool | `true` | HTTP client DNS cache enabled | +| agent.sidecar.config.client.net.dns.cache_expiration | string | `"24h"` | | +| agent.sidecar.config.client.net.dns.refresh_duration | string | `"1h"` | HTTP client DNS cache expiration | +| agent.sidecar.config.client.net.network | string | `"tcp"` | gRPC client dialer network type | +| agent.sidecar.config.client.net.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| agent.sidecar.config.client.net.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| agent.sidecar.config.client.net.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| agent.sidecar.config.client.net.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| agent.sidecar.config.client.net.socket_option.tcp_cork | bool | `true` | server listen socket option for tcp_cork functionality | +| agent.sidecar.config.client.net.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | +| agent.sidecar.config.client.net.socket_option.tcp_fast_open | bool | `true` | server listen socket option for tcp_fast_open functionality | +| agent.sidecar.config.client.net.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | +| agent.sidecar.config.client.net.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | +| agent.sidecar.config.client.net.tls.ca | string | `"/path/to/ca"` | TLS ca path | +| agent.sidecar.config.client.net.tls.cert | string | `"/path/to/cert"` | TLS cert path | +| agent.sidecar.config.client.net.tls.enabled | bool | `false` | TLS enabled | +| agent.sidecar.config.client.net.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | +| agent.sidecar.config.client.net.tls.key | string | `"/path/to/key"` | TLS key path | +| agent.sidecar.config.client.transport.backoff.backoff_factor | float | `1.1` | backoff backoff factor | +| agent.sidecar.config.client.transport.backoff.backoff_time_limit | string | `"5s"` | backoff time limit | +| agent.sidecar.config.client.transport.backoff.enable_error_log | bool | `true` | backoff error log enabled | +| agent.sidecar.config.client.transport.backoff.initial_duration | string | `"5ms"` | backoff initial duration | +| agent.sidecar.config.client.transport.backoff.jitter_limit | string | `"100ms"` | backoff jitter limit | +| agent.sidecar.config.client.transport.backoff.maximum_duration | string | `"5s"` | backoff maximum duration | +| agent.sidecar.config.client.transport.backoff.retry_count | int | `100` | backoff retry count | +| agent.sidecar.config.client.transport.round_tripper.expect_continue_timeout | string | `"5s"` | expect continue timeout | +| agent.sidecar.config.client.transport.round_tripper.force_attempt_http_2 | bool | `true` | force attempt HTTP2 | +| agent.sidecar.config.client.transport.round_tripper.idle_conn_timeout | string | `"90s"` | timeout for idle connections | +| agent.sidecar.config.client.transport.round_tripper.max_conns_per_host | int | `10` | maximum count of connections per host | +| agent.sidecar.config.client.transport.round_tripper.max_idle_conns | int | `100` | maximum count of idle connections | +| agent.sidecar.config.client.transport.round_tripper.max_idle_conns_per_host | int | `10` | maximum count of idle connections per host | +| agent.sidecar.config.client.transport.round_tripper.max_response_header_size | int | `0` | maximum response header size | +| agent.sidecar.config.client.transport.round_tripper.read_buffer_size | int | `0` | read buffer size | +| agent.sidecar.config.client.transport.round_tripper.response_header_timeout | string | `"5s"` | timeout for response header | +| agent.sidecar.config.client.transport.round_tripper.tls_handshake_timeout | string | `"5s"` | TLS handshake timeout | +| agent.sidecar.config.client.transport.round_tripper.write_buffer_size | int | `0` | write buffer size | +| agent.sidecar.config.compress.compress_algorithm | string | `"gzip"` | compression algorithm. must be `gob`, `gzip`, `lz4` or `zstd` | +| agent.sidecar.config.compress.compression_level | int | `-1` | compression level. value range relies on which algorithm is used. `gob`: level will be ignored. `gzip`: -1 (default compression), 0 (no compression), or 1 (best speed) to 9 (best compression). `lz4`: >= 0, higher is better compression. `zstd`: 1 (fastest) to 22 (best), however implementation relies on klauspost/compress. | +| agent.sidecar.config.filename | string | `"_MY_POD_NAME_"` | backup filename | +| agent.sidecar.config.filename_suffix | string | `".tar.gz"` | suffix for backup filename | +| agent.sidecar.config.post_stop_timeout | string | `"2m"` | timeout for observing file changes during post stop | +| agent.sidecar.config.restore_backoff.backoff_factor | float | `1.2` | restore backoff factor | +| agent.sidecar.config.restore_backoff.backoff_time_limit | string | `"30m"` | restore backoff time limit | +| agent.sidecar.config.restore_backoff.enable_error_log | bool | `true` | restore backoff log enabled | +| agent.sidecar.config.restore_backoff.initial_duration | string | `"1s"` | restore backoff initial duration | +| agent.sidecar.config.restore_backoff.jitter_limit | string | `"10s"` | restore backoff jitter limit | +| agent.sidecar.config.restore_backoff.maximum_duration | string | `"1m"` | restore backoff maximum duration | +| agent.sidecar.config.restore_backoff.retry_count | int | `100` | restore backoff retry count | +| agent.sidecar.config.restore_backoff_enabled | bool | `false` | restore backoff enabled | +| agent.sidecar.config.watch_enabled | bool | `true` | auto backup triggered by file changes is enabled | +| agent.sidecar.enabled | bool | `false` | sidecar enabled | +| agent.sidecar.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}},{"name":"AWS_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"access-key","name":"aws-secret"}}},{"name":"AWS_SECRET_ACCESS_KEY","valueFrom":{"secretKeyRef":{"key":"secret-access-key","name":"aws-secret"}}}]` | environment variables | +| agent.sidecar.image.pullPolicy | string | `"Always"` | image pull policy | +| agent.sidecar.image.repository | string | `"vdaas/vald-agent-sidecar"` | image repository | +| agent.sidecar.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| agent.sidecar.initContainerEnabled | bool | `false` | sidecar on initContainer mode enabled. | +| agent.sidecar.logging | object | `{}` | logging config (overrides defaults.logging) | +| agent.sidecar.name | string | `"vald-agent-sidecar"` | name of agent sidecar | +| agent.sidecar.observability | object | `{"otlp":{"attribute":{"service_name":"vald-agent-sidecar"}}}` | observability config (overrides defaults.observability) | +| agent.sidecar.resources | object | `{"requests":{"cpu":"100m","memory":"100Mi"}}` | compute resources. | +| agent.sidecar.server_config | object | `{"healths":{"liveness":{"enabled":false,"port":13000,"servicePort":13000},"readiness":{"enabled":false,"port":13001,"servicePort":13001},"startup":{"enabled":false,"port":13001}},"metrics":{"pprof":{"port":16060,"servicePort":16060}},"servers":{"grpc":{"enabled":false,"port":18081,"servicePort":18081},"rest":{"enabled":false,"port":18080,"servicePort":18080}}}` | server config (overrides defaults.server_config) | +| agent.sidecar.service.annotations | object | `{}` | agent sidecar service annotations | +| agent.sidecar.service.enabled | bool | `false` | agent sidecar service enabled | +| agent.sidecar.service.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| agent.sidecar.service.labels | object | `{}` | agent sidecar service labels | +| agent.sidecar.service.type | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | +| agent.sidecar.time_zone | string | `""` | Time zone | +| agent.sidecar.version | string | `"v0.0.0"` | version of agent sidecar config | +| agent.terminationGracePeriodSeconds | int | `120` | duration in seconds pod needs to terminate gracefully | +| agent.time_zone | string | `""` | Time zone | +| agent.tolerations | list | `[]` | tolerations | +| agent.topologySpreadConstraints | list | `[]` | topology spread constraints for agent pods | +| agent.unhealthyPodEvictionPolicy | string | `"IfHealthyBudget"` | controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. | +| agent.version | string | `"v0.0.0"` | version of agent config | +| agent.volumeMounts | list | `[]` | volume mounts | +| agent.volumes | list | `[]` | volumes | +| defaults.grpc.client.addrs | list | `[]` | gRPC client addresses | +| defaults.grpc.client.backoff.backoff_factor | float | `1.1` | gRPC client backoff factor | +| defaults.grpc.client.backoff.backoff_time_limit | string | `"5s"` | gRPC client backoff time limit | +| defaults.grpc.client.backoff.enable_error_log | bool | `true` | gRPC client backoff log enabled | +| defaults.grpc.client.backoff.initial_duration | string | `"5ms"` | gRPC client backoff initial duration | +| defaults.grpc.client.backoff.jitter_limit | string | `"100ms"` | gRPC client backoff jitter limit | +| defaults.grpc.client.backoff.maximum_duration | string | `"5s"` | gRPC client backoff maximum duration | +| defaults.grpc.client.backoff.retry_count | int | `100` | gRPC client backoff retry count | +| defaults.grpc.client.call_option.content_subtype | string | `""` | gRPC client call option content subtype | +| defaults.grpc.client.call_option.max_recv_msg_size | int | `0` | gRPC client call option max receive message size | +| defaults.grpc.client.call_option.max_retry_rpc_buffer_size | int | `0` | gRPC client call option max retry rpc buffer size | +| defaults.grpc.client.call_option.max_send_msg_size | int | `0` | gRPC client call option max send message size | +| defaults.grpc.client.call_option.wait_for_ready | bool | `true` | gRPC client call option wait for ready | +| defaults.grpc.client.circuit_breaker.closed_error_rate | float | `0.7` | gRPC client circuitbreaker closed error rate | +| defaults.grpc.client.circuit_breaker.closed_refresh_timeout | string | `"10s"` | gRPC client circuitbreaker closed refresh timeout | +| defaults.grpc.client.circuit_breaker.half_open_error_rate | float | `0.5` | gRPC client circuitbreaker half-open error rate | +| defaults.grpc.client.circuit_breaker.min_samples | int | `1000` | gRPC client circuitbreaker minimum sampling count | +| defaults.grpc.client.circuit_breaker.open_timeout | string | `"1s"` | gRPC client circuitbreaker open timeout | +| defaults.grpc.client.connection_pool.enable_dns_resolver | bool | `true` | enables gRPC client connection pool dns resolver, when enabled vald uses ip handshake exclude dns discovery which improves network performance | +| defaults.grpc.client.connection_pool.enable_rebalance | bool | `true` | enables gRPC client connection pool rebalance | +| defaults.grpc.client.connection_pool.old_conn_close_duration | string | `"2m"` | makes delay before gRPC client connection closing during connection pool rebalance | +| defaults.grpc.client.connection_pool.rebalance_duration | string | `"30m"` | gRPC client connection pool rebalance duration | +| defaults.grpc.client.connection_pool.size | int | `3` | gRPC client connection pool size | +| defaults.grpc.client.dial_option.authority | string | `""` | gRPC client dial option authority | +| defaults.grpc.client.dial_option.backoff_base_delay | string | `"1s"` | gRPC client dial option base backoff delay | +| defaults.grpc.client.dial_option.backoff_jitter | float | `0.2` | gRPC client dial option base backoff delay | +| defaults.grpc.client.dial_option.backoff_max_delay | string | `"120s"` | gRPC client dial option max backoff delay | +| defaults.grpc.client.dial_option.backoff_multiplier | float | `1.6` | gRPC client dial option base backoff delay | +| defaults.grpc.client.dial_option.disable_retry | bool | `false` | gRPC client dial option disables retry | +| defaults.grpc.client.dial_option.enable_backoff | bool | `false` | gRPC client dial option backoff enabled | +| defaults.grpc.client.dial_option.idle_timeout | string | `"1h"` | gRPC client dial option idle_timeout | +| defaults.grpc.client.dial_option.initial_connection_window_size | int | `2097152` | gRPC client dial option initial connection window size | +| defaults.grpc.client.dial_option.initial_window_size | int | `1048576` | gRPC client dial option initial window size | +| defaults.grpc.client.dial_option.insecure | bool | `true` | gRPC client dial option insecure enabled | +| defaults.grpc.client.dial_option.interceptors | list | `[]` | gRPC client interceptors | +| defaults.grpc.client.dial_option.keepalive.permit_without_stream | bool | `false` | gRPC client keep alive permit without stream | +| defaults.grpc.client.dial_option.keepalive.time | string | `""` | gRPC client keep alive time | +| defaults.grpc.client.dial_option.keepalive.timeout | string | `"30s"` | gRPC client keep alive timeout | +| defaults.grpc.client.dial_option.max_call_attempts | int | `0` | gRPC client dial option number of max call attempts | +| defaults.grpc.client.dial_option.max_header_list_size | int | `0` | gRPC client dial option max header list size | +| defaults.grpc.client.dial_option.max_msg_size | int | `0` | gRPC client dial option max message size | +| defaults.grpc.client.dial_option.min_connection_timeout | string | `"20s"` | gRPC client dial option minimum connection timeout | +| defaults.grpc.client.dial_option.net.dialer.dual_stack_enabled | bool | `true` | gRPC client TCP dialer dual stack enabled | +| defaults.grpc.client.dial_option.net.dialer.keepalive | string | `""` | gRPC client TCP dialer keep alive | +| defaults.grpc.client.dial_option.net.dialer.timeout | string | `""` | gRPC client TCP dialer timeout | +| defaults.grpc.client.dial_option.net.dns.cache_enabled | bool | `true` | gRPC client DNS cache enabled | +| defaults.grpc.client.dial_option.net.dns.cache_expiration | string | `"1h"` | gRPC client DNS cache expiration | +| defaults.grpc.client.dial_option.net.dns.refresh_duration | string | `"30m"` | gRPC client DNS cache refresh duration | +| defaults.grpc.client.dial_option.net.network | string | `"tcp"` | gRPC client dialer network type | +| defaults.grpc.client.dial_option.net.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| defaults.grpc.client.dial_option.net.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| defaults.grpc.client.dial_option.net.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| defaults.grpc.client.dial_option.net.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| defaults.grpc.client.dial_option.net.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | +| defaults.grpc.client.dial_option.net.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | +| defaults.grpc.client.dial_option.net.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | +| defaults.grpc.client.dial_option.net.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | +| defaults.grpc.client.dial_option.net.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | +| defaults.grpc.client.dial_option.net.tls.ca | string | `"/path/to/ca"` | TLS ca path | +| defaults.grpc.client.dial_option.net.tls.cert | string | `"/path/to/cert"` | TLS cert path | +| defaults.grpc.client.dial_option.net.tls.enabled | bool | `false` | TLS enabled | +| defaults.grpc.client.dial_option.net.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | +| defaults.grpc.client.dial_option.net.tls.key | string | `"/path/to/key"` | TLS key path | +| defaults.grpc.client.dial_option.read_buffer_size | int | `0` | gRPC client dial option read buffer size | +| defaults.grpc.client.dial_option.shared_write_buffer | bool | `false` | gRPC client dial option sharing write buffer | +| defaults.grpc.client.dial_option.timeout | string | `""` | gRPC client dial option timeout | +| defaults.grpc.client.dial_option.user_agent | string | `"Vald-gRPC"` | gRPC client dial option user_agent | +| defaults.grpc.client.dial_option.write_buffer_size | int | `0` | gRPC client dial option write buffer size | +| defaults.grpc.client.health_check_duration | string | `"1s"` | gRPC client health check duration | +| defaults.grpc.client.tls.ca | string | `"/path/to/ca"` | TLS ca path | +| defaults.grpc.client.tls.cert | string | `"/path/to/cert"` | TLS cert path | +| defaults.grpc.client.tls.enabled | bool | `false` | TLS enabled | +| defaults.grpc.client.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | +| defaults.grpc.client.tls.key | string | `"/path/to/key"` | TLS key path | +| defaults.image.tag | string | `"v1.7.16"` | docker image tag | +| defaults.logging.format | string | `"raw"` | logging format. logging format must be `raw` or `json` | +| defaults.logging.level | string | `"debug"` | logging level. logging level must be `debug`, `info`, `warn`, `error` or `fatal`. | +| defaults.logging.logger | string | `"glg"` | logger name. currently logger must be `glg` or `zap`. | +| defaults.networkPolicy.custom | object | `{"egress":[],"ingress":[]}` | custom network policies that a user can add | +| defaults.networkPolicy.custom.egress | list | `[]` | custom egress network policies that a user can add | +| defaults.networkPolicy.custom.ingress | list | `[]` | custom ingress network policies that a user can add | +| defaults.networkPolicy.enabled | bool | `false` | if network policy enabled | +| defaults.observability.enabled | bool | `false` | observability features enabled | +| defaults.observability.metrics.enable_cgo | bool | `true` | CGO metrics enabled | +| defaults.observability.metrics.enable_goroutine | bool | `true` | goroutine metrics enabled | +| defaults.observability.metrics.enable_memory | bool | `true` | memory metrics enabled | +| defaults.observability.metrics.enable_version_info | bool | `true` | version info metrics enabled | +| defaults.observability.metrics.version_info_labels | list | `["vald_version","server_name","git_commit","build_time","go_version","go_os","go_arch","algorithm_info"]` | enabled label names of version info | +| defaults.observability.otlp.attribute | object | `{"namespace":"_MY_POD_NAMESPACE_","node_name":"_MY_NODE_NAME_","pod_name":"_MY_POD_NAME_","service_name":"vald"}` | default resource attribute | +| defaults.observability.otlp.attribute.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace | +| defaults.observability.otlp.attribute.node_name | string | `"_MY_NODE_NAME_"` | node name | +| defaults.observability.otlp.attribute.pod_name | string | `"_MY_POD_NAME_"` | pod name | +| defaults.observability.otlp.attribute.service_name | string | `"vald"` | service name | +| defaults.observability.otlp.collector_endpoint | string | `""` | OpenTelemetry Collector endpoint | +| defaults.observability.otlp.metrics_export_interval | string | `"1s"` | metrics export interval | +| defaults.observability.otlp.metrics_export_timeout | string | `"1m"` | metrics export timeout | +| defaults.observability.otlp.trace_batch_timeout | string | `"1s"` | trace batch timeout | +| defaults.observability.otlp.trace_export_timeout | string | `"1m"` | trace export timeout | +| defaults.observability.otlp.trace_max_export_batch_size | int | `1024` | trace maximum export batch size | +| defaults.observability.otlp.trace_max_queue_size | int | `256` | trace maximum queue size | +| defaults.observability.trace.enabled | bool | `false` | trace enabled | +| defaults.server_config.full_shutdown_duration | string | `"600s"` | server full shutdown duration | +| defaults.server_config.healths.liveness.enabled | bool | `true` | liveness server enabled | +| defaults.server_config.healths.liveness.host | string | `"0.0.0.0"` | liveness server host | +| defaults.server_config.healths.liveness.livenessProbe.failureThreshold | int | `2` | liveness probe failure threshold | +| defaults.server_config.healths.liveness.livenessProbe.httpGet.path | string | `"/liveness"` | liveness probe path | +| defaults.server_config.healths.liveness.livenessProbe.httpGet.port | string | `"liveness"` | liveness probe port | +| defaults.server_config.healths.liveness.livenessProbe.httpGet.scheme | string | `"HTTP"` | liveness probe scheme | +| defaults.server_config.healths.liveness.livenessProbe.initialDelaySeconds | int | `5` | liveness probe initial delay seconds | +| defaults.server_config.healths.liveness.livenessProbe.periodSeconds | int | `3` | liveness probe period seconds | +| defaults.server_config.healths.liveness.livenessProbe.successThreshold | int | `1` | liveness probe success threshold | +| defaults.server_config.healths.liveness.livenessProbe.timeoutSeconds | int | `2` | liveness probe timeout seconds | +| defaults.server_config.healths.liveness.port | int | `3000` | liveness server port | +| defaults.server_config.healths.liveness.server.http.handler_timeout | string | `""` | liveness server handler timeout | +| defaults.server_config.healths.liveness.server.http.http2.enabled | bool | `false` | HTTP2 server enabled | +| defaults.server_config.healths.liveness.server.http.http2.handler_limit | int | `0` | Limits the number of http.Handler ServeHTTP goroutines which may run at a time over all connections. Negative or zero no limit. | +| defaults.server_config.healths.liveness.server.http.http2.max_concurrent_streams | int | `0` | The number of concurrent streams that each client may have open at a time. | +| defaults.server_config.healths.liveness.server.http.http2.max_decoder_header_table_size | int | `4096` | Informs the remote endpoint of the maximum size of the header compression table used to decode header blocks, in octets. If zero, the default value of 4096 is used. | +| defaults.server_config.healths.liveness.server.http.http2.max_encoder_header_table_size | int | `4096` | An upper limit for the header compression table used for encoding request headers. | +| defaults.server_config.healths.liveness.server.http.http2.max_read_frame_size | int | `0` | The largest frame this server is willing to read. | +| defaults.server_config.healths.liveness.server.http.http2.max_upload_buffer_per_connection | int | `0` | The size of the initial flow control window for each connections. | +| defaults.server_config.healths.liveness.server.http.http2.max_upload_buffer_per_stream | int | `0` | The size of the initial flow control window for each streams. | +| defaults.server_config.healths.liveness.server.http.http2.permit_prohibited_cipher_suites | bool | `true` | if true, permits the use of cipher suites prohibited by the HTTP/2 spec. | +| defaults.server_config.healths.liveness.server.http.idle_timeout | string | `""` | liveness server idle timeout | +| defaults.server_config.healths.liveness.server.http.read_header_timeout | string | `""` | liveness server read header timeout | +| defaults.server_config.healths.liveness.server.http.read_timeout | string | `""` | liveness server read timeout | +| defaults.server_config.healths.liveness.server.http.shutdown_duration | string | `"5s"` | liveness server shutdown duration | +| defaults.server_config.healths.liveness.server.http.write_timeout | string | `""` | liveness server write timeout | +| defaults.server_config.healths.liveness.server.mode | string | `"REST"` | liveness server mode | +| defaults.server_config.healths.liveness.server.network | string | `"tcp"` | network mode | +| defaults.server_config.healths.liveness.server.probe_wait_time | string | `"3s"` | liveness server probe wait time | +| defaults.server_config.healths.liveness.server.restart | bool | `true` | This configuration enables automatic restart of the same configured server when it becomes unhealthy. | +| defaults.server_config.healths.liveness.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| defaults.server_config.healths.liveness.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| defaults.server_config.healths.liveness.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| defaults.server_config.healths.liveness.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| defaults.server_config.healths.liveness.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | +| defaults.server_config.healths.liveness.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | +| defaults.server_config.healths.liveness.server.socket_option.tcp_fast_open | bool | `true` | server listen socket option for tcp_fast_open functionality | +| defaults.server_config.healths.liveness.server.socket_option.tcp_no_delay | bool | `true` | server listen socket option for tcp_no_delay functionality | +| defaults.server_config.healths.liveness.server.socket_option.tcp_quick_ack | bool | `true` | server listen socket option for tcp_quick_ack functionality | +| defaults.server_config.healths.liveness.server.socket_path | string | `""` | server socket_path | +| defaults.server_config.healths.liveness.servicePort | int | `3000` | liveness server service port | +| defaults.server_config.healths.readiness.enabled | bool | `true` | readiness server enabled | +| defaults.server_config.healths.readiness.host | string | `"0.0.0.0"` | readiness server host | +| defaults.server_config.healths.readiness.port | int | `3001` | readiness server port | +| defaults.server_config.healths.readiness.readinessProbe.failureThreshold | int | `2` | readiness probe failure threshold | +| defaults.server_config.healths.readiness.readinessProbe.httpGet.path | string | `"/readiness"` | readiness probe path | +| defaults.server_config.healths.readiness.readinessProbe.httpGet.port | string | `"readiness"` | readiness probe port | +| defaults.server_config.healths.readiness.readinessProbe.httpGet.scheme | string | `"HTTP"` | readiness probe scheme | +| defaults.server_config.healths.readiness.readinessProbe.initialDelaySeconds | int | `10` | readiness probe initial delay seconds | +| defaults.server_config.healths.readiness.readinessProbe.periodSeconds | int | `3` | readiness probe period seconds | +| defaults.server_config.healths.readiness.readinessProbe.successThreshold | int | `1` | readiness probe success threshold | +| defaults.server_config.healths.readiness.readinessProbe.timeoutSeconds | int | `2` | readiness probe timeout seconds | +| defaults.server_config.healths.readiness.server.http.handler_timeout | string | `""` | readiness server handler timeout | +| defaults.server_config.healths.readiness.server.http.http2.enabled | bool | `false` | HTTP2 server enabled | +| defaults.server_config.healths.readiness.server.http.http2.handler_limit | int | `0` | Limits the number of http.Handler ServeHTTP goroutines which may run at a time over all connections. Negative or zero no limit. | +| defaults.server_config.healths.readiness.server.http.http2.max_concurrent_streams | int | `0` | The number of concurrent streams that each client may have open at a time. | +| defaults.server_config.healths.readiness.server.http.http2.max_decoder_header_table_size | int | `4096` | Informs the remote endpoint of the maximum size of the header compression table used to decode header blocks, in octets. If zero, the default value of 4096 is used. | +| defaults.server_config.healths.readiness.server.http.http2.max_encoder_header_table_size | int | `4096` | An upper limit for the header compression table used for encoding request headers. | +| defaults.server_config.healths.readiness.server.http.http2.max_read_frame_size | int | `0` | The largest frame this server is willing to read. | +| defaults.server_config.healths.readiness.server.http.http2.max_upload_buffer_per_connection | int | `0` | The size of the initial flow control window for each connections. | +| defaults.server_config.healths.readiness.server.http.http2.max_upload_buffer_per_stream | int | `0` | The size of the initial flow control window for each streams. | +| defaults.server_config.healths.readiness.server.http.http2.permit_prohibited_cipher_suites | bool | `true` | if true, permits the use of cipher suites prohibited by the HTTP/2 spec. | +| defaults.server_config.healths.readiness.server.http.idle_timeout | string | `""` | readiness server idle timeout | +| defaults.server_config.healths.readiness.server.http.read_header_timeout | string | `""` | readiness server read header timeout | +| defaults.server_config.healths.readiness.server.http.read_timeout | string | `""` | readiness server read timeout | +| defaults.server_config.healths.readiness.server.http.shutdown_duration | string | `"0s"` | readiness server shutdown duration | +| defaults.server_config.healths.readiness.server.http.write_timeout | string | `""` | readiness server write timeout | +| defaults.server_config.healths.readiness.server.mode | string | `"REST"` | readiness server mode | +| defaults.server_config.healths.readiness.server.network | string | `"tcp"` | network mode | +| defaults.server_config.healths.readiness.server.probe_wait_time | string | `"3s"` | readiness server probe wait time | +| defaults.server_config.healths.readiness.server.restart | bool | `true` | This configuration enables automatic restart of the same configured server when it becomes unhealthy. | +| defaults.server_config.healths.readiness.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| defaults.server_config.healths.readiness.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| defaults.server_config.healths.readiness.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| defaults.server_config.healths.readiness.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| defaults.server_config.healths.readiness.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | +| defaults.server_config.healths.readiness.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | +| defaults.server_config.healths.readiness.server.socket_option.tcp_fast_open | bool | `true` | server listen socket option for tcp_fast_open functionality | +| defaults.server_config.healths.readiness.server.socket_option.tcp_no_delay | bool | `true` | server listen socket option for tcp_no_delay functionality | +| defaults.server_config.healths.readiness.server.socket_option.tcp_quick_ack | bool | `true` | server listen socket option for tcp_quick_ack functionality | +| defaults.server_config.healths.readiness.server.socket_path | string | `""` | server socket_path | +| defaults.server_config.healths.readiness.servicePort | int | `3001` | readiness server service port | +| defaults.server_config.healths.startup.enabled | bool | `true` | startup server enabled | +| defaults.server_config.healths.startup.port | int | `3000` | startup server port | +| defaults.server_config.healths.startup.startupProbe.failureThreshold | int | `30` | startup probe failure threshold | +| defaults.server_config.healths.startup.startupProbe.httpGet.path | string | `"/liveness"` | startup probe path | +| defaults.server_config.healths.startup.startupProbe.httpGet.port | string | `"liveness"` | startup probe port | +| defaults.server_config.healths.startup.startupProbe.httpGet.scheme | string | `"HTTP"` | startup probe scheme | +| defaults.server_config.healths.startup.startupProbe.initialDelaySeconds | int | `5` | startup probe initial delay seconds | +| defaults.server_config.healths.startup.startupProbe.periodSeconds | int | `5` | startup probe period seconds | +| defaults.server_config.healths.startup.startupProbe.successThreshold | int | `1` | startup probe success threshold | +| defaults.server_config.healths.startup.startupProbe.timeoutSeconds | int | `2` | startup probe timeout seconds | +| defaults.server_config.metrics.pprof.enabled | bool | `false` | pprof server enabled | +| defaults.server_config.metrics.pprof.host | string | `"0.0.0.0"` | pprof server host | +| defaults.server_config.metrics.pprof.port | int | `6060` | pprof server port | +| defaults.server_config.metrics.pprof.server.http.handler_timeout | string | `"5s"` | pprof server handler timeout | +| defaults.server_config.metrics.pprof.server.http.http2.enabled | bool | `false` | HTTP2 server enabled | +| defaults.server_config.metrics.pprof.server.http.http2.handler_limit | int | `0` | Limits the number of http.Handler ServeHTTP goroutines which may run at a time over all connections. Negative or zero no limit. | +| defaults.server_config.metrics.pprof.server.http.http2.max_concurrent_streams | int | `0` | The number of concurrent streams that each client may have open at a time. | +| defaults.server_config.metrics.pprof.server.http.http2.max_decoder_header_table_size | int | `4096` | Informs the remote endpoint of the maximum size of the header compression table used to decode header blocks, in octets. If zero, the default value of 4096 is used. | +| defaults.server_config.metrics.pprof.server.http.http2.max_encoder_header_table_size | int | `4096` | An upper limit for the header compression table used for encoding request headers. | +| defaults.server_config.metrics.pprof.server.http.http2.max_read_frame_size | int | `0` | The largest frame this server is willing to read. | +| defaults.server_config.metrics.pprof.server.http.http2.max_upload_buffer_per_connection | int | `0` | The size of the initial flow control window for each connections. | +| defaults.server_config.metrics.pprof.server.http.http2.max_upload_buffer_per_stream | int | `0` | The size of the initial flow control window for each streams. | +| defaults.server_config.metrics.pprof.server.http.http2.permit_prohibited_cipher_suites | bool | `true` | if true, permits the use of cipher suites prohibited by the HTTP/2 spec. | +| defaults.server_config.metrics.pprof.server.http.idle_timeout | string | `"2s"` | pprof server idle timeout | +| defaults.server_config.metrics.pprof.server.http.read_header_timeout | string | `"1s"` | pprof server read header timeout | +| defaults.server_config.metrics.pprof.server.http.read_timeout | string | `"1s"` | pprof server read timeout | +| defaults.server_config.metrics.pprof.server.http.shutdown_duration | string | `"5s"` | pprof server shutdown duration | +| defaults.server_config.metrics.pprof.server.http.write_timeout | string | `"1m"` | pprof server write timeout | +| defaults.server_config.metrics.pprof.server.mode | string | `"REST"` | pprof server mode | +| defaults.server_config.metrics.pprof.server.network | string | `"tcp"` | network mode | +| defaults.server_config.metrics.pprof.server.probe_wait_time | string | `"3s"` | pprof server probe wait time | +| defaults.server_config.metrics.pprof.server.restart | bool | `true` | This configuration enables automatic restart of the same configured server when it becomes unhealthy. | +| defaults.server_config.metrics.pprof.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| defaults.server_config.metrics.pprof.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| defaults.server_config.metrics.pprof.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| defaults.server_config.metrics.pprof.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| defaults.server_config.metrics.pprof.server.socket_option.tcp_cork | bool | `true` | server listen socket option for tcp_cork functionality | +| defaults.server_config.metrics.pprof.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | +| defaults.server_config.metrics.pprof.server.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | +| defaults.server_config.metrics.pprof.server.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | +| defaults.server_config.metrics.pprof.server.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | +| defaults.server_config.metrics.pprof.server.socket_path | string | `""` | server socket_path | +| defaults.server_config.metrics.pprof.servicePort | int | `6060` | pprof server service port | +| defaults.server_config.servers.grpc.enabled | bool | `true` | gRPC server enabled | +| defaults.server_config.servers.grpc.host | string | `"0.0.0.0"` | gRPC server host | +| defaults.server_config.servers.grpc.port | int | `8081` | gRPC server port | +| defaults.server_config.servers.grpc.server.grpc.bidirectional_stream_concurrency | int | `20` | gRPC server bidirectional stream concurrency | +| defaults.server_config.servers.grpc.server.grpc.connection_timeout | string | `""` | gRPC server connection timeout | +| defaults.server_config.servers.grpc.server.grpc.enable_admin | bool | `true` | gRPC server admin option | +| defaults.server_config.servers.grpc.server.grpc.enable_channelz | bool | `true` | gRPC server channelz option | +| defaults.server_config.servers.grpc.server.grpc.enable_reflection | bool | `true` | gRPC server reflection option | +| defaults.server_config.servers.grpc.server.grpc.header_table_size | int | `0` | gRPC server header table size | +| defaults.server_config.servers.grpc.server.grpc.initial_conn_window_size | int | `2097152` | gRPC server initial connection window size | +| defaults.server_config.servers.grpc.server.grpc.initial_window_size | int | `1048576` | gRPC server initial window size | +| defaults.server_config.servers.grpc.server.grpc.interceptors | list | `["RecoverInterceptor"]` | gRPC server interceptors | +| defaults.server_config.servers.grpc.server.grpc.keepalive.max_conn_age | string | `""` | gRPC server keep alive max connection age | +| defaults.server_config.servers.grpc.server.grpc.keepalive.max_conn_age_grace | string | `""` | gRPC server keep alive max connection age grace | +| defaults.server_config.servers.grpc.server.grpc.keepalive.max_conn_idle | string | `""` | gRPC server keep alive max connection idle | +| defaults.server_config.servers.grpc.server.grpc.keepalive.min_time | string | `"10m"` | gRPC server keep alive min_time | +| defaults.server_config.servers.grpc.server.grpc.keepalive.permit_without_stream | bool | `false` | gRPC server keep alive permit_without_stream | +| defaults.server_config.servers.grpc.server.grpc.keepalive.time | string | `"3h"` | gRPC server keep alive time | +| defaults.server_config.servers.grpc.server.grpc.keepalive.timeout | string | `"60s"` | gRPC server keep alive timeout | +| defaults.server_config.servers.grpc.server.grpc.max_concurrent_streams | int | `0` | gRPC server max concurrent stream size | +| defaults.server_config.servers.grpc.server.grpc.max_header_list_size | int | `0` | gRPC server max header list size | +| defaults.server_config.servers.grpc.server.grpc.max_receive_message_size | int | `0` | gRPC server max receive message size | +| defaults.server_config.servers.grpc.server.grpc.max_send_message_size | int | `0` | gRPC server max send message size | +| defaults.server_config.servers.grpc.server.grpc.num_stream_workers | int | `0` | gRPC server number of stream workers | +| defaults.server_config.servers.grpc.server.grpc.read_buffer_size | int | `0` | gRPC server read buffer size | +| defaults.server_config.servers.grpc.server.grpc.shared_write_buffer | bool | `false` | gRPC server write buffer sharing option | +| defaults.server_config.servers.grpc.server.grpc.wait_for_handlers | bool | `true` | gRPC server wait for handlers when stop | +| defaults.server_config.servers.grpc.server.grpc.write_buffer_size | int | `0` | gRPC server write buffer size | +| defaults.server_config.servers.grpc.server.mode | string | `"GRPC"` | gRPC server server mode | +| defaults.server_config.servers.grpc.server.network | string | `"tcp"` | network mode | +| defaults.server_config.servers.grpc.server.probe_wait_time | string | `"3s"` | gRPC server probe wait time | +| defaults.server_config.servers.grpc.server.restart | bool | `true` | This configuration enables automatic restart of the same configured server when it becomes unhealthy. | +| defaults.server_config.servers.grpc.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| defaults.server_config.servers.grpc.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| defaults.server_config.servers.grpc.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| defaults.server_config.servers.grpc.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| defaults.server_config.servers.grpc.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | +| defaults.server_config.servers.grpc.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | +| defaults.server_config.servers.grpc.server.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | +| defaults.server_config.servers.grpc.server.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | +| defaults.server_config.servers.grpc.server.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | +| defaults.server_config.servers.grpc.server.socket_path | string | `""` | server socket_path | +| defaults.server_config.servers.grpc.servicePort | int | `8081` | gRPC server service port | +| defaults.server_config.servers.rest.enabled | bool | `false` | REST server enabled | +| defaults.server_config.servers.rest.host | string | `"0.0.0.0"` | REST server host | +| defaults.server_config.servers.rest.port | int | `8080` | REST server port | +| defaults.server_config.servers.rest.server.http.handler_timeout | string | `"5s"` | REST server handler timeout | +| defaults.server_config.servers.rest.server.http.http2.enabled | bool | `false` | HTTP2 server enabled | +| defaults.server_config.servers.rest.server.http.http2.handler_limit | int | `0` | Limits the number of http.Handler ServeHTTP goroutines which may run at a time over all connections. Negative or zero no limit. | +| defaults.server_config.servers.rest.server.http.http2.max_concurrent_streams | int | `0` | The number of concurrent streams that each client may have open at a time. | +| defaults.server_config.servers.rest.server.http.http2.max_decoder_header_table_size | int | `4096` | Informs the remote endpoint of the maximum size of the header compression table used to decode header blocks, in octets. If zero, the default value of 4096 is used. | +| defaults.server_config.servers.rest.server.http.http2.max_encoder_header_table_size | int | `4096` | An upper limit for the header compression table used for encoding request headers. | +| defaults.server_config.servers.rest.server.http.http2.max_read_frame_size | int | `0` | The largest frame this server is willing to read. | +| defaults.server_config.servers.rest.server.http.http2.max_upload_buffer_per_connection | int | `0` | The size of the initial flow control window for each connections. | +| defaults.server_config.servers.rest.server.http.http2.max_upload_buffer_per_stream | int | `0` | The size of the initial flow control window for each streams. | +| defaults.server_config.servers.rest.server.http.http2.permit_prohibited_cipher_suites | bool | `true` | if true, permits the use of cipher suites prohibited by the HTTP/2 spec. | +| defaults.server_config.servers.rest.server.http.idle_timeout | string | `"2s"` | REST server idle timeout | +| defaults.server_config.servers.rest.server.http.read_header_timeout | string | `"1s"` | REST server read header timeout | +| defaults.server_config.servers.rest.server.http.read_timeout | string | `"1s"` | REST server read timeout | +| defaults.server_config.servers.rest.server.http.shutdown_duration | string | `"5s"` | REST server shutdown duration | +| defaults.server_config.servers.rest.server.http.write_timeout | string | `"1s"` | REST server write timeout | +| defaults.server_config.servers.rest.server.mode | string | `"REST"` | REST server server mode | +| defaults.server_config.servers.rest.server.network | string | `"tcp"` | network mode | +| defaults.server_config.servers.rest.server.probe_wait_time | string | `"3s"` | REST server probe wait time | +| defaults.server_config.servers.rest.server.restart | bool | `true` | | +| defaults.server_config.servers.rest.server.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| defaults.server_config.servers.rest.server.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| defaults.server_config.servers.rest.server.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| defaults.server_config.servers.rest.server.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| defaults.server_config.servers.rest.server.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | +| defaults.server_config.servers.rest.server.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | +| defaults.server_config.servers.rest.server.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | +| defaults.server_config.servers.rest.server.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | +| defaults.server_config.servers.rest.server.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | +| defaults.server_config.servers.rest.server.socket_path | string | `""` | network socket_path | +| defaults.server_config.servers.rest.servicePort | int | `8080` | REST server service port | +| defaults.server_config.tls.ca | string | `"/path/to/ca"` | TLS ca path | +| defaults.server_config.tls.cert | string | `"/path/to/cert"` | TLS cert path | +| defaults.server_config.tls.enabled | bool | `false` | TLS enabled | +| defaults.server_config.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | +| defaults.server_config.tls.key | string | `"/path/to/key"` | TLS key path | +| defaults.time_zone | string | `"UTC"` | Time zone | +| discoverer.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| discoverer.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| discoverer.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| discoverer.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| discoverer.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-discoverer"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | +| discoverer.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| discoverer.annotations | object | `{}` | deployment annotations | +| discoverer.clusterRole.enabled | bool | `true` | creates clusterRole resource | +| discoverer.clusterRole.name | string | `"discoverer"` | name of clusterRole | +| discoverer.clusterRoleBinding.enabled | bool | `true` | creates clusterRoleBinding resource | +| discoverer.clusterRoleBinding.name | string | `"discoverer"` | name of clusterRoleBinding | +| discoverer.discoverer.discovery_duration | string | `"3s"` | duration to discovery | +| discoverer.discoverer.name | string | `""` | name to discovery | +| discoverer.discoverer.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace to discovery | +| discoverer.discoverer.net.dialer.dual_stack_enabled | bool | `false` | TCP dialer dual stack enabled | +| discoverer.discoverer.net.dialer.keepalive | string | `"10m"` | TCP dialer keep alive | +| discoverer.discoverer.net.dialer.timeout | string | `"30s"` | TCP dialer timeout | +| discoverer.discoverer.net.dns.cache_enabled | bool | `true` | DNS cache enabled | +| discoverer.discoverer.net.dns.cache_expiration | string | `"24h"` | DNS cache expiration | +| discoverer.discoverer.net.dns.refresh_duration | string | `"5m"` | DNS cache refresh duration | +| discoverer.discoverer.net.network | string | `"tcp"` | gRPC client dialer network type | +| discoverer.discoverer.net.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| discoverer.discoverer.net.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| discoverer.discoverer.net.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| discoverer.discoverer.net.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| discoverer.discoverer.net.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | +| discoverer.discoverer.net.socket_option.tcp_defer_accept | bool | `false` | server listen socket option for tcp_defer_accept functionality | +| discoverer.discoverer.net.socket_option.tcp_fast_open | bool | `false` | server listen socket option for tcp_fast_open functionality | +| discoverer.discoverer.net.socket_option.tcp_no_delay | bool | `false` | server listen socket option for tcp_no_delay functionality | +| discoverer.discoverer.net.socket_option.tcp_quick_ack | bool | `false` | server listen socket option for tcp_quick_ack functionality | +| discoverer.discoverer.net.tls.ca | string | `"/path/to/ca"` | TLS ca path | +| discoverer.discoverer.net.tls.cert | string | `"/path/to/cert"` | TLS cert path | +| discoverer.discoverer.net.tls.enabled | bool | `false` | TLS enabled | +| discoverer.discoverer.net.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | +| discoverer.discoverer.net.tls.key | string | `"/path/to/key"` | TLS key path | +| discoverer.discoverer.selectors | object | `{"node":{"fields":{},"labels":{}},"node_metrics":{"fields":{},"labels":{}},"pod":{"fields":{},"labels":{}},"pod_metrics":{"fields":{},"labels":{}},"service":{"fields":{},"labels":{}}}` | k8s resource selectors | +| discoverer.discoverer.selectors.node | object | `{"fields":{},"labels":{}}` | k8s resource selectors for node discovery | +| discoverer.discoverer.selectors.node.fields | object | `{}` | k8s field selectors for node discovery | +| discoverer.discoverer.selectors.node.labels | object | `{}` | k8s label selectors for node discovery | +| discoverer.discoverer.selectors.node_metrics | object | `{"fields":{},"labels":{}}` | k8s resource selectors for node_metrics discovery | +| discoverer.discoverer.selectors.node_metrics.fields | object | `{}` | k8s field selectors for node_metrics discovery | +| discoverer.discoverer.selectors.node_metrics.labels | object | `{}` | k8s label selectors for node_metrics discovery | +| discoverer.discoverer.selectors.pod | object | `{"fields":{},"labels":{}}` | k8s resource selectors for pod discovery | +| discoverer.discoverer.selectors.pod.fields | object | `{}` | k8s field selectors for pod discovery | +| discoverer.discoverer.selectors.pod.labels | object | `{}` | k8s label selectors for pod discovery | +| discoverer.discoverer.selectors.pod_metrics | object | `{"fields":{},"labels":{}}` | k8s resource selectors for pod_metrics discovery | +| discoverer.discoverer.selectors.pod_metrics.fields | object | `{}` | k8s field selectors for pod_metrics discovery | +| discoverer.discoverer.selectors.pod_metrics.labels | object | `{}` | k8s label selectors for pod_metrics discovery | +| discoverer.discoverer.selectors.service | object | `{"fields":{},"labels":{}}` | k8s resource selectors for service discovery | +| discoverer.discoverer.selectors.service.fields | object | `{}` | k8s field selectors for service discovery | +| discoverer.discoverer.selectors.service.labels | object | `{}` | k8s label selectors for service discovery | +| discoverer.enabled | bool | `true` | discoverer enabled | +| discoverer.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| discoverer.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| discoverer.hpa.enabled | bool | `false` | HPA enabled | +| discoverer.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | +| discoverer.image.pullPolicy | string | `"Always"` | image pull policy | +| discoverer.image.repository | string | `"vdaas/vald-discoverer-k8s"` | image repository | +| discoverer.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| discoverer.initContainers | list | `[]` | init containers | +| discoverer.internalTrafficPolicy | string | `""` | internal traffic policy : Cluster or Local | +| discoverer.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | +| discoverer.logging | object | `{}` | logging config (overrides defaults.logging) | +| discoverer.maxReplicas | int | `2` | maximum number of replicas. if HPA is disabled, this value will be ignored. | +| discoverer.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | +| discoverer.minReplicas | int | `1` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | +| discoverer.name | string | `"vald-discoverer"` | name of discoverer deployment | +| discoverer.nodeName | string | `""` | node name | +| discoverer.nodeSelector | object | `{}` | node selector | +| discoverer.observability | object | `{"otlp":{"attribute":{"service_name":"vald-discoverer"}}}` | observability config (overrides defaults.observability) | +| discoverer.podAnnotations | object | `{}` | pod annotations | +| discoverer.podPriority.enabled | bool | `true` | discoverer pod PriorityClass enabled | +| discoverer.podPriority.value | int | `1000000` | discoverer pod PriorityClass value | +| discoverer.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | +| discoverer.progressDeadlineSeconds | int | `600` | progress deadline seconds | +| discoverer.resources | object | `{"limits":{"cpu":"600m","memory":"200Mi"},"requests":{"cpu":"200m","memory":"65Mi"}}` | compute resources | +| discoverer.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | +| discoverer.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | +| discoverer.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | +| discoverer.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | +| discoverer.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| discoverer.service.annotations | object | `{}` | service annotations | +| discoverer.service.labels | object | `{}` | service labels | +| discoverer.serviceAccount.enabled | bool | `true` | creates service account | +| discoverer.serviceAccount.name | string | `"vald"` | name of service account | +| discoverer.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | +| discoverer.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | +| discoverer.time_zone | string | `""` | Time zone | +| discoverer.tolerations | list | `[]` | tolerations | +| discoverer.topologySpreadConstraints | list | `[]` | topology spread constraints of discoverer pods | +| discoverer.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. | +| discoverer.version | string | `"v0.0.0"` | version of discoverer config | +| discoverer.volumeMounts | list | `[]` | volume mounts | +| discoverer.volumes | list | `[]` | volumes | +| gateway.filter.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| gateway.filter.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| gateway.filter.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| gateway.filter.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| gateway.filter.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-filter-gateway"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | +| gateway.filter.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| gateway.filter.annotations | object | `{}` | deployment annotations | +| gateway.filter.enabled | bool | `false` | gateway enabled | +| gateway.filter.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| gateway.filter.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| gateway.filter.gateway_config.egress_filter | object | `{"client":{},"distance_filters":[],"object_filters":[]}` | gRPC client config for egress filter | +| gateway.filter.gateway_config.egress_filter.client | object | `{}` | gRPC client config for egress filter (overrides defaults.grpc.client) | +| gateway.filter.gateway_config.egress_filter.distance_filters | list | `[]` | distance egress vector filter targets | +| gateway.filter.gateway_config.egress_filter.object_filters | list | `[]` | object egress vector filter targets | +| gateway.filter.gateway_config.gateway_client | object | `{}` | gRPC client for next gateway (overrides defaults.grpc.client) | +| gateway.filter.gateway_config.ingress_filter | object | `{"client":{},"insert_filters":[],"search_filters":[],"update_filters":[],"upsert_filters":[],"vectorizer":""}` | gRPC client config for ingress filter | +| gateway.filter.gateway_config.ingress_filter.client | object | `{}` | gRPC client for ingress filter (overrides defaults.grpc.client) | +| gateway.filter.gateway_config.ingress_filter.insert_filters | list | `[]` | insert ingress vector filter targets | +| gateway.filter.gateway_config.ingress_filter.search_filters | list | `[]` | search ingress vector filter targets | +| gateway.filter.gateway_config.ingress_filter.update_filters | list | `[]` | update ingress vector filter targets | +| gateway.filter.gateway_config.ingress_filter.upsert_filters | list | `[]` | upsert ingress vector filter targets | +| gateway.filter.gateway_config.ingress_filter.vectorizer | string | `""` | object ingress vectorize filter targets | +| gateway.filter.hpa.enabled | bool | `true` | HPA enabled | +| gateway.filter.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | +| gateway.filter.image.pullPolicy | string | `"Always"` | image pull policy | +| gateway.filter.image.repository | string | `"vdaas/vald-filter-gateway"` | image repository | +| gateway.filter.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| gateway.filter.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/grpc-backend":"true"}` | annotations for ingress | +| gateway.filter.ingress.defaultBackend | object | `{"enabled":true}` | defaultBackend config | +| gateway.filter.ingress.defaultBackend.enabled | bool | `true` | gateway ingress defaultBackend enabled | +| gateway.filter.ingress.enabled | bool | `false` | gateway ingress enabled | +| gateway.filter.ingress.host | string | `"filter.gateway.vald.vdaas.org"` | ingress hostname | +| gateway.filter.ingress.pathType | string | `"ImplementationSpecific"` | gateway ingress pathType | +| gateway.filter.ingress.servicePort | string | `"grpc"` | service port to be exposed by ingress | +| gateway.filter.initContainers | list | `[{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-gateway-lb","sleepDuration":2,"target":"gateway-lb","type":"wait-for"}]` | init containers | +| gateway.filter.internalTrafficPolicy | string | `""` | internal traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| gateway.filter.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | +| gateway.filter.logging | object | `{}` | logging config (overrides defaults.logging) | +| gateway.filter.maxReplicas | int | `9` | maximum number of replicas. if HPA is disabled, this value will be ignored. | +| gateway.filter.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | +| gateway.filter.minReplicas | int | `3` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | +| gateway.filter.name | string | `"vald-filter-gateway"` | name of filter gateway deployment | +| gateway.filter.nodeName | string | `""` | node name | +| gateway.filter.nodeSelector | object | `{}` | node selector | +| gateway.filter.observability | object | `{"otlp":{"attribute":{"service_name":"vald-filter-gateway"}}}` | observability config (overrides defaults.observability) | +| gateway.filter.podAnnotations | object | `{}` | pod annotations | +| gateway.filter.podPriority.enabled | bool | `true` | gateway pod PriorityClass enabled | +| gateway.filter.podPriority.value | int | `1000000` | gateway pod PriorityClass value | +| gateway.filter.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | +| gateway.filter.progressDeadlineSeconds | int | `600` | progress deadline seconds | +| gateway.filter.resources | object | `{"limits":{"cpu":"2000m","memory":"700Mi"},"requests":{"cpu":"200m","memory":"150Mi"}}` | compute resources | +| gateway.filter.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | +| gateway.filter.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | +| gateway.filter.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | +| gateway.filter.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | +| gateway.filter.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| gateway.filter.service.annotations | object | `{}` | service annotations | +| gateway.filter.service.labels | object | `{}` | service labels | +| gateway.filter.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | +| gateway.filter.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | +| gateway.filter.time_zone | string | `""` | Time zone | +| gateway.filter.tolerations | list | `[]` | tolerations | +| gateway.filter.topologySpreadConstraints | list | `[]` | topology spread constraints of gateway pods | +| gateway.filter.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. | +| gateway.filter.version | string | `"v0.0.0"` | version of gateway config | +| gateway.filter.volumeMounts | list | `[]` | volume mounts | +| gateway.filter.volumes | list | `[]` | volumes | +| gateway.lb.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| gateway.lb.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| gateway.lb.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| gateway.lb.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| gateway.lb.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-lb-gateway"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | +| gateway.lb.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| gateway.lb.annotations | object | `{}` | deployment annotations | +| gateway.lb.enabled | bool | `true` | gateway enabled | +| gateway.lb.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| gateway.lb.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| gateway.lb.gateway_config.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | agent namespace | +| gateway.lb.gateway_config.discoverer.agent_client_options | object | `{}` | gRPC client options for agents (overrides defaults.grpc.client) | +| gateway.lb.gateway_config.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | +| gateway.lb.gateway_config.discoverer.duration | string | `"200ms"` | | +| gateway.lb.gateway_config.discoverer.read_client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | +| gateway.lb.gateway_config.index_replica | int | `3` | number of index replica | +| gateway.lb.gateway_config.multi_operation_concurrency | int | `20` | number of concurrency of multiXXX api's operation | +| gateway.lb.gateway_config.node_name | string | `""` | node name | +| gateway.lb.hpa.enabled | bool | `true` | HPA enabled | +| gateway.lb.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | +| gateway.lb.image.pullPolicy | string | `"Always"` | image pull policy | +| gateway.lb.image.repository | string | `"vdaas/vald-lb-gateway"` | image repository | +| gateway.lb.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| gateway.lb.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/grpc-backend":"true"}` | annotations for ingress | +| gateway.lb.ingress.defaultBackend | object | `{"enabled":true}` | defaultBackend config | +| gateway.lb.ingress.defaultBackend.enabled | bool | `true` | gateway ingress defaultBackend enabled | +| gateway.lb.ingress.enabled | bool | `false` | gateway ingress enabled | +| gateway.lb.ingress.host | string | `"lb.gateway.vald.vdaas.org"` | ingress hostname | +| gateway.lb.ingress.pathType | string | `"ImplementationSpecific"` | gateway ingress pathType | +| gateway.lb.ingress.servicePort | string | `"grpc"` | service port to be exposed by ingress | +| gateway.lb.initContainers | list | `[{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"},{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"}]` | init containers | +| gateway.lb.internalTrafficPolicy | string | `""` | internal traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| gateway.lb.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | +| gateway.lb.logging | object | `{}` | logging config (overrides defaults.logging) | +| gateway.lb.maxReplicas | int | `9` | maximum number of replicas. if HPA is disabled, this value will be ignored. | +| gateway.lb.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | +| gateway.lb.minReplicas | int | `3` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | +| gateway.lb.name | string | `"vald-lb-gateway"` | name of gateway deployment | +| gateway.lb.nodeName | string | `""` | node name | +| gateway.lb.nodeSelector | object | `{}` | node selector | +| gateway.lb.observability | object | `{"otlp":{"attribute":{"service_name":"vald-lb-gateway"}}}` | observability config (overrides defaults.observability) | +| gateway.lb.podAnnotations | object | `{}` | pod annotations | +| gateway.lb.podPriority.enabled | bool | `true` | gateway pod PriorityClass enabled | +| gateway.lb.podPriority.value | int | `1000000` | gateway pod PriorityClass value | +| gateway.lb.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | +| gateway.lb.progressDeadlineSeconds | int | `600` | progress deadline seconds | +| gateway.lb.resources | object | `{"limits":{"cpu":"2000m","memory":"700Mi"},"requests":{"cpu":"200m","memory":"150Mi"}}` | compute resources | +| gateway.lb.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | +| gateway.lb.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | +| gateway.lb.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | +| gateway.lb.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | +| gateway.lb.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| gateway.lb.service.annotations | object | `{}` | service annotations | +| gateway.lb.service.labels | object | `{}` | service labels | +| gateway.lb.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | +| gateway.lb.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | +| gateway.lb.time_zone | string | `""` | Time zone | +| gateway.lb.tolerations | list | `[]` | tolerations | +| gateway.lb.topologySpreadConstraints | list | `[]` | topology spread constraints of gateway pods | +| gateway.lb.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. | +| gateway.lb.version | string | `"v0.0.0"` | version of gateway config | +| gateway.lb.volumeMounts | list | `[]` | volume mounts | +| gateway.lb.volumes | list | `[]` | volumes | +| gateway.mirror.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| gateway.mirror.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| gateway.mirror.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| gateway.mirror.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| gateway.mirror.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-mirror-gateway"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | +| gateway.mirror.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| gateway.mirror.annotations | object | `{}` | deployment annotations | +| gateway.mirror.clusterRole.enabled | bool | `true` | creates clusterRole resource | +| gateway.mirror.clusterRole.name | string | `"gateway-mirror"` | name of clusterRole | +| gateway.mirror.clusterRoleBinding.enabled | bool | `true` | creates clusterRoleBinding resource | +| gateway.mirror.clusterRoleBinding.name | string | `"gateway-mirror"` | name of clusterRoleBinding | +| gateway.mirror.enabled | bool | `false` | gateway enabled | +| gateway.mirror.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| gateway.mirror.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| gateway.mirror.gateway_config.client | object | `{}` | gRPC client (overrides defaults.grpc.client) | +| gateway.mirror.gateway_config.colocation | string | `"dc1"` | colocation name | +| gateway.mirror.gateway_config.discovery_duration | string | `"1s"` | duration to discovery | +| gateway.mirror.gateway_config.gateway_addr | string | `""` | address for lb-gateway | +| gateway.mirror.gateway_config.group | string | `""` | mirror group name | +| gateway.mirror.gateway_config.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace to discovery | +| gateway.mirror.gateway_config.net.dialer.dual_stack_enabled | bool | `false` | TCP dialer dual stack enabled | +| gateway.mirror.gateway_config.net.dialer.keepalive | string | `"10m"` | TCP dialer keep alive | +| gateway.mirror.gateway_config.net.dialer.timeout | string | `"30s"` | TCP dialer timeout | +| gateway.mirror.gateway_config.net.dns.cache_enabled | bool | `true` | DNS cache enabled | +| gateway.mirror.gateway_config.net.dns.cache_expiration | string | `"24h"` | DNS cache expiration | +| gateway.mirror.gateway_config.net.dns.refresh_duration | string | `"5m"` | DNS cache refresh duration | +| gateway.mirror.gateway_config.net.network | string | `"tcp"` | | +| gateway.mirror.gateway_config.net.socket_option.ip_recover_destination_addr | bool | `false` | server listen socket option for ip_recover_destination_addr functionality | +| gateway.mirror.gateway_config.net.socket_option.ip_transparent | bool | `false` | server listen socket option for ip_transparent functionality | +| gateway.mirror.gateway_config.net.socket_option.reuse_addr | bool | `true` | server listen socket option for reuse_addr functionality | +| gateway.mirror.gateway_config.net.socket_option.reuse_port | bool | `true` | server listen socket option for reuse_port functionality | +| gateway.mirror.gateway_config.net.socket_option.tcp_cork | bool | `false` | server listen socket option for tcp_cork functionality | +| gateway.mirror.gateway_config.net.socket_option.tcp_defer_accept | bool | `true` | server listen socket option for tcp_defer_accept functionality | +| gateway.mirror.gateway_config.net.socket_option.tcp_fast_open | bool | `true` | server listen socket option for tcp_fast_open functionality | +| gateway.mirror.gateway_config.net.socket_option.tcp_no_delay | bool | `true` | server listen socket option for tcp_no_delay functionality | +| gateway.mirror.gateway_config.net.socket_option.tcp_quick_ack | bool | `true` | server listen socket option for tcp_quick_ack functionality | +| gateway.mirror.gateway_config.net.tls.ca | string | `"/path/to/ca"` | TLS ca path | +| gateway.mirror.gateway_config.net.tls.cert | string | `"/path/to/cert"` | TLS cert path | +| gateway.mirror.gateway_config.net.tls.enabled | bool | `false` | TLS enabled | +| gateway.mirror.gateway_config.net.tls.insecure_skip_verify | bool | `false` | enable/disable skip SSL certificate verification | +| gateway.mirror.gateway_config.net.tls.key | string | `"/path/to/key"` | TLS key path | +| gateway.mirror.gateway_config.pod_name | string | `"_MY_POD_NAME_"` | self mirror gateway pod name | +| gateway.mirror.gateway_config.register_duration | string | `"1s"` | duration to register mirror-gateway. | +| gateway.mirror.gateway_config.self_mirror_addr | string | `""` | address for self mirror-gateway | +| gateway.mirror.hpa.enabled | bool | `true` | HPA enabled | +| gateway.mirror.hpa.targetCPUUtilizationPercentage | int | `80` | HPA CPU utilization percentage | +| gateway.mirror.image.pullPolicy | string | `"Always"` | image pull policy | +| gateway.mirror.image.repository | string | `"vdaas/vald-mirror-gateway"` | image repository | +| gateway.mirror.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| gateway.mirror.ingress.annotations | object | `{"nginx.ingress.kubernetes.io/grpc-backend":"true"}` | annotations for ingress | +| gateway.mirror.ingress.defaultBackend | object | `{"enabled":true}` | defaultBackend config | +| gateway.mirror.ingress.defaultBackend.enabled | bool | `true` | gateway ingress defaultBackend enabled | +| gateway.mirror.ingress.enabled | bool | `false` | gateway ingress enabled | +| gateway.mirror.ingress.host | string | `"mirror.gateway.vald.vdaas.org"` | ingress hostname | +| gateway.mirror.ingress.pathType | string | `"ImplementationSpecific"` | gateway ingress pathType | +| gateway.mirror.ingress.servicePort | string | `"grpc"` | service port to be exposed by ingress | +| gateway.mirror.initContainers | list | `[{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-gateway-lb","sleepDuration":2,"target":"gateway-lb","type":"wait-for"}]` | init containers | +| gateway.mirror.internalTrafficPolicy | string | `""` | internal traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| gateway.mirror.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | +| gateway.mirror.logging | object | `{}` | logging config (overrides defaults.logging) | +| gateway.mirror.maxReplicas | int | `9` | maximum number of replicas. if HPA is disabled, this value will be ignored. | +| gateway.mirror.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | +| gateway.mirror.minReplicas | int | `3` | minimum number of replicas. if HPA is disabled, the replicas will be set to this value | +| gateway.mirror.name | string | `"vald-mirror-gateway"` | name of gateway deployment | +| gateway.mirror.nodeName | string | `""` | node name | +| gateway.mirror.nodeSelector | object | `{}` | node selector | +| gateway.mirror.observability | object | `{"otlp":{"attribute":{"service_name":"vald-mirror-gateway"}}}` | observability config (overrides defaults.observability) | +| gateway.mirror.podAnnotations | object | `{}` | pod annotations | +| gateway.mirror.podPriority.enabled | bool | `true` | gateway pod PriorityClass enabled | +| gateway.mirror.podPriority.value | int | `1000000` | gateway pod PriorityClass value | +| gateway.mirror.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | +| gateway.mirror.progressDeadlineSeconds | int | `600` | progress deadline seconds | +| gateway.mirror.resources | object | `{"limits":{"cpu":"2000m","memory":"700Mi"},"requests":{"cpu":"200m","memory":"150Mi"}}` | compute resources | +| gateway.mirror.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | +| gateway.mirror.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | +| gateway.mirror.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | +| gateway.mirror.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | +| gateway.mirror.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| gateway.mirror.service.annotations | object | `{}` | service annotations | +| gateway.mirror.service.labels | object | `{}` | service labels | +| gateway.mirror.serviceAccount.enabled | bool | `true` | creates service account | +| gateway.mirror.serviceAccount.name | string | `"gateway-mirror"` | name of service account | +| gateway.mirror.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | +| gateway.mirror.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | +| gateway.mirror.time_zone | string | `""` | Time zone | +| gateway.mirror.tolerations | list | `[]` | tolerations | +| gateway.mirror.topologySpreadConstraints | list | `[]` | topology spread constraints of gateway pods | +| gateway.mirror.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. | +| gateway.mirror.version | string | `"v0.0.0"` | version of gateway config | +| gateway.mirror.volumeMounts | list | `[]` | volume mounts | +| gateway.mirror.volumes | list | `[]` | volumes | +| manager.index.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| manager.index.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| manager.index.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| manager.index.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| manager.index.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity preferred scheduling terms | +| manager.index.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| manager.index.annotations | object | `{}` | deployment annotations | +| manager.index.corrector.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| manager.index.corrector.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| manager.index.corrector.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| manager.index.corrector.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| manager.index.corrector.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity preferred scheduling terms | +| manager.index.corrector.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| manager.index.corrector.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | +| manager.index.corrector.discoverer.agent_client_options | object | `{"dial_option":{"net":{"dialer":{"keepalive":"15m"}}}}` | gRPC client options for agents (overrides defaults.grpc.client) | +| manager.index.corrector.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | +| manager.index.corrector.discoverer.duration | string | `"500ms"` | refresh duration to discover | +| manager.index.corrector.enabled | bool | `false` | enable index correction CronJob | +| manager.index.corrector.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| manager.index.corrector.gateway | object | `{}` | gRPC client for gateway (overrides defaults.grpc.client) | +| manager.index.corrector.image.pullPolicy | string | `"Always"` | | +| manager.index.corrector.image.repository | string | `"vdaas/vald-index-correction"` | image repository | +| manager.index.corrector.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| manager.index.corrector.initContainers | list | `[{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"},{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"}]` | init containers | +| manager.index.corrector.kvs_background_compaction_interval | string | `"5s"` | interval of checked id list kvs compaction | +| manager.index.corrector.kvs_background_sync_interval | string | `"5s"` | interval of checked id list kvs sync | +| manager.index.corrector.name | string | `"vald-index-correction"` | name of index correction job | +| manager.index.corrector.nodeSelector | object | `{}` | node selector | +| manager.index.corrector.node_name | string | `""` | node name | +| manager.index.corrector.observability | object | `{"otlp":{"attribute":{"service_name":"vald-index-correction"}}}` | observability config (overrides defaults.observability) | +| manager.index.corrector.schedule | string | `"6 3 * * *"` | CronJob schedule setting for index correction | +| manager.index.corrector.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| manager.index.corrector.startingDeadlineSeconds | int | `86400` | startingDeadlineSeconds setting for K8s completed jobs | +| manager.index.corrector.stream_list_concurrency | int | `200` | concurrency for stream list object rpc | +| manager.index.corrector.suspend | bool | `false` | CronJob suspend setting for index correction | +| manager.index.corrector.tolerations | list | `[]` | tolerations | +| manager.index.corrector.ttlSecondsAfterFinished | int | `86400` | ttl setting for K8s completed jobs | +| manager.index.corrector.version | string | `"v0.0.0"` | version of index manager config | +| manager.index.creator.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| manager.index.creator.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| manager.index.creator.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| manager.index.creator.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| manager.index.creator.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity preferred scheduling terms | +| manager.index.creator.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| manager.index.creator.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | +| manager.index.creator.concurrency | int | `1` | concurrency for indexing | +| manager.index.creator.creation_pool_size | int | `16` | number of pool size of create index processing | +| manager.index.creator.discoverer.agent_client_options | object | `{"dial_option":{"net":{"dialer":{"keepalive":"15m"}}}}` | gRPC client options for agents (overrides defaults.grpc.client) | +| manager.index.creator.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | +| manager.index.creator.discoverer.duration | string | `"500ms"` | refresh duration to discover | +| manager.index.creator.enabled | bool | `false` | enable index creation CronJob | +| manager.index.creator.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| manager.index.creator.image.pullPolicy | string | `"Always"` | | +| manager.index.creator.image.repository | string | `"vdaas/vald-index-creation"` | image repository | +| manager.index.creator.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| manager.index.creator.initContainers | list | `[{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"},{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"}]` | init containers | +| manager.index.creator.name | string | `"vald-index-creation"` | name of index creation job | +| manager.index.creator.nodeSelector | object | `{}` | node selector | +| manager.index.creator.node_name | string | `""` | node name | +| manager.index.creator.observability | object | `{"otlp":{"attribute":{"service_name":"vald-index-creation"}}}` | observability config (overrides defaults.observability) | +| manager.index.creator.schedule | string | `"* * * * *"` | CronJob schedule setting for index creation | +| manager.index.creator.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| manager.index.creator.startingDeadlineSeconds | int | `43200` | startingDeadlineSeconds setting for K8s completed jobs | +| manager.index.creator.suspend | bool | `false` | CronJob suspend setting for index creation | +| manager.index.creator.target_addrs | list | `[]` | indexing target addresses | +| manager.index.creator.tolerations | list | `[]` | tolerations | +| manager.index.creator.ttlSecondsAfterFinished | int | `86400` | ttl setting for K8s completed jobs | +| manager.index.creator.version | string | `"v0.0.0"` | version of index manager config | +| manager.index.enabled | bool | `true` | index manager enabled | +| manager.index.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| manager.index.externalTrafficPolicy | string | `""` | external traffic policy (can be specified when service type is LoadBalancer or NodePort) : Cluster or Local | +| manager.index.image.pullPolicy | string | `"Always"` | image pull policy | +| manager.index.image.repository | string | `"vdaas/vald-manager-index"` | image repository | +| manager.index.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| manager.index.indexer.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | +| manager.index.indexer.auto_index_check_duration | string | `"1m"` | check duration of automatic indexing | +| manager.index.indexer.auto_index_duration_limit | string | `"30m"` | limit duration of automatic indexing | +| manager.index.indexer.auto_index_length | int | `100` | number of cache to trigger automatic indexing | +| manager.index.indexer.auto_save_index_duration_limit | string | `"3h"` | limit duration of automatic index saving | +| manager.index.indexer.auto_save_index_wait_duration | string | `"10m"` | duration of automatic index saving wait duration for next saving | +| manager.index.indexer.concurrency | int | `1` | concurrency | +| manager.index.indexer.creation_pool_size | int | `16` | number of pool size of create index processing | +| manager.index.indexer.discoverer.agent_client_options | object | `{"dial_option":{"net":{"dialer":{"keepalive":"15m"}}}}` | gRPC client options for agents (overrides defaults.grpc.client) | +| manager.index.indexer.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | +| manager.index.indexer.discoverer.duration | string | `"500ms"` | refresh duration to discover | +| manager.index.indexer.node_name | string | `""` | node name | +| manager.index.initContainers | list | `[{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"},{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"}]` | init containers | +| manager.index.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | +| manager.index.logging | object | `{}` | logging config (overrides defaults.logging) | +| manager.index.maxUnavailable | string | `"50%"` | maximum number of unavailable replicas | +| manager.index.name | string | `"vald-manager-index"` | name of index manager deployment | +| manager.index.nodeName | string | `""` | node name | +| manager.index.nodeSelector | object | `{}` | node selector | +| manager.index.observability | object | `{"otlp":{"attribute":{"service_name":"vald-manager-index"}}}` | observability config (overrides defaults.observability) | +| manager.index.operator | object | `{"affinity":{"nodeAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[],"requiredDuringSchedulingIgnoredDuringExecution":{"nodeSelectorTerms":[]}},"podAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[],"requiredDuringSchedulingIgnoredDuringExecution":[]},"podAntiAffinity":{"preferredDuringSchedulingIgnoredDuringExecution":[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-index-operator"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}],"requiredDuringSchedulingIgnoredDuringExecution":[]}},"annotations":{},"enabled":false,"env":[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"image":{"pullPolicy":"Always","repository":"vdaas/vald-index-operator","tag":""},"initContainers":[],"kind":"Deployment","logging":{},"name":"vald-index-operator","namespace":"_MY_POD_NAMESPACE_","nodeName":"","nodeSelector":{},"observability":{"otlp":{"attribute":{"service_name":"vald-index-operator"}}},"podAnnotations":{},"podPriority":{"enabled":true,"value":1000000},"podSecurityContext":{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532},"progressDeadlineSeconds":600,"replicas":1,"resources":{"limits":{"cpu":"600m","memory":"200Mi"},"requests":{"cpu":"200m","memory":"65Mi"}},"revisionHistoryLimit":2,"rollingUpdate":{"maxSurge":"25%","maxUnavailable":"25%"},"rotation_job_concurrency":2,"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532},"server_config":{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}},"terminationGracePeriodSeconds":30,"time_zone":"","tolerations":[],"topologySpreadConstraints":[],"version":"v0.0.0","volumeMounts":[],"volumes":[]}` | [THIS FEATURE IS WIP] operator that manages vald index | +| manager.index.operator.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| manager.index.operator.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| manager.index.operator.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| manager.index.operator.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| manager.index.operator.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[{"podAffinityTerm":{"labelSelector":{"matchExpressions":[{"key":"app","operator":"In","values":["vald-index-operator"]}]},"topologyKey":"kubernetes.io/hostname"},"weight":100}]` | pod anti-affinity preferred scheduling terms | +| manager.index.operator.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| manager.index.operator.annotations | object | `{}` | deployment annotations | +| manager.index.operator.enabled | bool | `false` | index operator enabled | +| manager.index.operator.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| manager.index.operator.image.pullPolicy | string | `"Always"` | image pull policy | +| manager.index.operator.image.repository | string | `"vdaas/vald-index-operator"` | image repository | +| manager.index.operator.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| manager.index.operator.initContainers | list | `[]` | init containers | +| manager.index.operator.kind | string | `"Deployment"` | deployment kind: Deployment or DaemonSet | +| manager.index.operator.logging | object | `{}` | logging config (overrides defaults.logging) | +| manager.index.operator.name | string | `"vald-index-operator"` | name of manager.index.operator deployment | +| manager.index.operator.namespace | string | `"_MY_POD_NAMESPACE_"` | namespace to discovery | +| manager.index.operator.nodeName | string | `""` | node name | +| manager.index.operator.nodeSelector | object | `{}` | node selector | +| manager.index.operator.observability | object | `{"otlp":{"attribute":{"service_name":"vald-index-operator"}}}` | observability config (overrides defaults.observability) | +| manager.index.operator.podAnnotations | object | `{}` | pod annotations | +| manager.index.operator.podPriority.enabled | bool | `true` | manager.index.operator pod PriorityClass enabled | +| manager.index.operator.podPriority.value | int | `1000000` | manager.index.operator pod PriorityClass value | +| manager.index.operator.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | +| manager.index.operator.progressDeadlineSeconds | int | `600` | progress deadline seconds | +| manager.index.operator.replicas | int | `1` | number of replicas. | +| manager.index.operator.resources | object | `{"limits":{"cpu":"600m","memory":"200Mi"},"requests":{"cpu":"200m","memory":"65Mi"}}` | compute resources | +| manager.index.operator.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | +| manager.index.operator.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | +| manager.index.operator.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | +| manager.index.operator.rotation_job_concurrency | int | `2` | maximum concurrent rotator job run. | +| manager.index.operator.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | +| manager.index.operator.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| manager.index.operator.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | +| manager.index.operator.time_zone | string | `""` | Time zone | +| manager.index.operator.tolerations | list | `[]` | tolerations | +| manager.index.operator.topologySpreadConstraints | list | `[]` | topology spread constraints of manager.index.operator pods | +| manager.index.operator.version | string | `"v0.0.0"` | version of index operator config | +| manager.index.operator.volumeMounts | list | `[]` | volume mounts | +| manager.index.operator.volumes | list | `[]` | volumes | +| manager.index.podAnnotations | object | `{}` | pod annotations | +| manager.index.podPriority.enabled | bool | `true` | index manager pod PriorityClass enabled | +| manager.index.podPriority.value | int | `1000000` | index manager pod PriorityClass value | +| manager.index.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | +| manager.index.progressDeadlineSeconds | int | `600` | progress deadline seconds | +| manager.index.readreplica.rotator | object | `{"agent_namespace":"_MY_POD_NAMESPACE_","clusterRole":{"enabled":true,"name":"vald-readreplica-rotate"},"clusterRoleBinding":{"enabled":true,"name":"vald-readreplica-rotate"},"env":[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}],"image":{"pullPolicy":"Always","repository":"vdaas/vald-readreplica-rotate","tag":""},"initContainers":[],"name":"vald-readreplica-rotate","observability":{"otlp":{"attribute":{"service_name":"vald-readreplica-rotate"}}},"podSecurityContext":{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532},"securityContext":{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532},"server_config":{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}},"serviceAccount":{"enabled":true,"name":"vald-readreplica-rotate"},"target_read_replica_id_annotations_key":"vald.vdaas.org/target-read-replica-id","ttlSecondsAfterFinished":86400,"version":"v0.0.0"}` | [This feature is work in progress] readreplica agents rotation job | +| manager.index.readreplica.rotator.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | +| manager.index.readreplica.rotator.clusterRole.enabled | bool | `true` | creates clusterRole resource | +| manager.index.readreplica.rotator.clusterRole.name | string | `"vald-readreplica-rotate"` | name of clusterRole | +| manager.index.readreplica.rotator.clusterRoleBinding.enabled | bool | `true` | creates clusterRoleBinding resource | +| manager.index.readreplica.rotator.clusterRoleBinding.name | string | `"vald-readreplica-rotate"` | name of clusterRoleBinding | +| manager.index.readreplica.rotator.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| manager.index.readreplica.rotator.image.repository | string | `"vdaas/vald-readreplica-rotate"` | image repository | +| manager.index.readreplica.rotator.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| manager.index.readreplica.rotator.initContainers | list | `[]` | init containers | +| manager.index.readreplica.rotator.name | string | `"vald-readreplica-rotate"` | name of readreplica rotator job | +| manager.index.readreplica.rotator.observability | object | `{"otlp":{"attribute":{"service_name":"vald-readreplica-rotate"}}}` | observability config (overrides defaults.observability) | +| manager.index.readreplica.rotator.podSecurityContext | object | `{"fsGroup":65532,"fsGroupChangePolicy":"OnRootMismatch","runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for pod | +| manager.index.readreplica.rotator.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | +| manager.index.readreplica.rotator.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| manager.index.readreplica.rotator.serviceAccount.enabled | bool | `true` | creates service account | +| manager.index.readreplica.rotator.serviceAccount.name | string | `"vald-readreplica-rotate"` | name of service account | +| manager.index.readreplica.rotator.target_read_replica_id_annotations_key | string | `"vald.vdaas.org/target-read-replica-id"` | name of annotations key for target read replica id | +| manager.index.readreplica.rotator.ttlSecondsAfterFinished | int | `86400` | ttl setting for K8s completed jobs | +| manager.index.readreplica.rotator.version | string | `"v0.0.0"` | version of readreplica rotator config | +| manager.index.replicas | int | `1` | number of replicas | +| manager.index.resources | object | `{"limits":{"cpu":"1000m","memory":"500Mi"},"requests":{"cpu":"200m","memory":"80Mi"}}` | compute resources | +| manager.index.revisionHistoryLimit | int | `2` | number of old history to retain to allow rollback | +| manager.index.rollingUpdate.maxSurge | string | `"25%"` | max surge of rolling update | +| manager.index.rollingUpdate.maxUnavailable | string | `"25%"` | max unavailable of rolling update | +| manager.index.saver.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | node affinity preferred scheduling terms | +| manager.index.saver.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms | list | `[]` | node affinity required node selectors | +| manager.index.saver.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity preferred scheduling terms | +| manager.index.saver.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod affinity required scheduling terms | +| manager.index.saver.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity preferred scheduling terms | +| manager.index.saver.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution | list | `[]` | pod anti-affinity required scheduling terms | +| manager.index.saver.agent_namespace | string | `"_MY_POD_NAMESPACE_"` | namespace of agent pods to manage | +| manager.index.saver.concurrency | int | `1` | concurrency for index saving | +| manager.index.saver.discoverer.agent_client_options | object | `{"dial_option":{"net":{"dialer":{"keepalive":"15m"}}}}` | gRPC client options for agents (overrides defaults.grpc.client) | +| manager.index.saver.discoverer.client | object | `{}` | gRPC client for discoverer (overrides defaults.grpc.client) | +| manager.index.saver.discoverer.duration | string | `"500ms"` | refresh duration to discover | +| manager.index.saver.enabled | bool | `false` | enable index save CronJob | +| manager.index.saver.env | list | `[{"name":"MY_NODE_NAME","valueFrom":{"fieldRef":{"fieldPath":"spec.nodeName"}}},{"name":"MY_POD_NAME","valueFrom":{"fieldRef":{"fieldPath":"metadata.name"}}},{"name":"MY_POD_NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | environment variables | +| manager.index.saver.image.pullPolicy | string | `"Always"` | | +| manager.index.saver.image.repository | string | `"vdaas/vald-index-save"` | image repository | +| manager.index.saver.image.tag | string | `""` | image tag (overrides defaults.image.tag) | +| manager.index.saver.initContainers | list | `[{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-agent","sleepDuration":2,"target":"agent","type":"wait-for"},{"image":"busybox:stable","imagePullPolicy":"Always","name":"wait-for-discoverer","sleepDuration":2,"target":"discoverer","type":"wait-for"}]` | init containers | +| manager.index.saver.name | string | `"vald-index-save"` | name of index save job | +| manager.index.saver.nodeSelector | object | `{}` | node selector | +| manager.index.saver.node_name | string | `""` | node name | +| manager.index.saver.observability | object | `{"otlp":{"attribute":{"service_name":"vald-index-save"}}}` | observability config (overrides defaults.observability) | +| manager.index.saver.schedule | string | `"0 */3 * * *"` | CronJob schedule setting for index save | +| manager.index.saver.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| manager.index.saver.startingDeadlineSeconds | int | `43200` | startingDeadlineSeconds setting for K8s completed jobs | +| manager.index.saver.suspend | bool | `false` | CronJob suspend setting for index creation | +| manager.index.saver.target_addrs | list | `[]` | index saving target addresses | +| manager.index.saver.tolerations | list | `[]` | tolerations | +| manager.index.saver.ttlSecondsAfterFinished | int | `86400` | ttl setting for K8s completed jobs | +| manager.index.saver.version | string | `"v0.0.0"` | version of index manager config | +| manager.index.securityContext | object | `{"allowPrivilegeEscalation":false,"capabilities":{"drop":["ALL"]},"privileged":false,"readOnlyRootFilesystem":true,"runAsGroup":65532,"runAsNonRoot":true,"runAsUser":65532}` | security context for container | +| manager.index.server_config | object | `{"healths":{"liveness":{},"readiness":{},"startup":{}},"metrics":{"pprof":{}},"servers":{"grpc":{},"rest":{}}}` | server config (overrides defaults.server_config) | +| manager.index.service.annotations | object | `{}` | service annotations | +| manager.index.service.labels | object | `{}` | service labels | +| manager.index.serviceType | string | `"ClusterIP"` | service type: ClusterIP, LoadBalancer or NodePort | +| manager.index.terminationGracePeriodSeconds | int | `30` | duration in seconds pod needs to terminate gracefully | +| manager.index.time_zone | string | `""` | Time zone | +| manager.index.tolerations | list | `[]` | tolerations | +| manager.index.topologySpreadConstraints | list | `[]` | topology spread constraints of index manager pods | +| manager.index.unhealthyPodEvictionPolicy | string | `"AlwaysAllow"` | controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. | +| manager.index.version | string | `"v0.0.0"` | version of index manager config | +| manager.index.volumeMounts | list | `[]` | volume mounts | +| manager.index.volumes | list | `[]` | volumes | diff --git a/charts/vald/crds/valdmirrortarget.yaml b/charts/vald/crds/valdmirrortarget.yaml index 91131bd3ae..1909ab7886 100644 --- a/charts/vald/crds/valdmirrortarget.yaml +++ b/charts/vald/crds/valdmirrortarget.yaml @@ -81,20 +81,20 @@ spec: lastTransitionTime: type: string spec: - type: object properties: colocation: type: string target: - type: object properties: host: - type: string minLength: 1 + type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer required: - host - port + type: object + type: object diff --git a/charts/vald/templates/_helpers.tpl b/charts/vald/templates/_helpers.tpl index 2edc19e03c..11730a90ab 100755 --- a/charts/vald/templates/_helpers.tpl +++ b/charts/vald/templates/_helpers.tpl @@ -699,6 +699,7 @@ initContainers {{- if .type }} - name: {{ .name }} image: {{ .image }} + imagePullPolicy: {{ .imagePullPolicy }} {{- if eq .type "wait-for" }} command: - /bin/sh @@ -895,6 +896,10 @@ spec: {{- toYaml .Job.securityContext | nindent 12 }} {{- end }} {{- end }} + {{- if .Job.affinity }} + affinity: + {{- include "vald.affinity" .Job.affinity | nindent 8 }} + {{- end }} containers: - name: {{ .Job.name }} image: "{{ .Job.image.repository }}:{{ default .default.Values.defaults.image.tag .Job.image.tag }}" @@ -928,6 +933,14 @@ spec: configMap: defaultMode: 420 name: {{ .Job.name }}-config + {{- if .Job.nodeSelector }} + nodeSelector: + {{- toYaml .Job.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Job.tolerations }} + tolerations: + {{- toYaml .Job.tolerations | nindent 8 }} + {{- end }} {{- if .Job.serviceAccount }} serviceAccountName: {{ .Job.serviceAccount.name }} {{- end }} diff --git a/charts/vald/templates/agent/pdb.yaml b/charts/vald/templates/agent/pdb.yaml index ea1ca29fe7..a990f440ae 100644 --- a/charts/vald/templates/agent/pdb.yaml +++ b/charts/vald/templates/agent/pdb.yaml @@ -35,4 +35,5 @@ spec: selector: matchLabels: app: {{ $agent.name }} + unhealthyPodEvictionPolicy: {{ $agent.unhealthyPodEvictionPolicy }} {{- end }} diff --git a/charts/vald/templates/discoverer/pdb.yaml b/charts/vald/templates/discoverer/pdb.yaml index 3577f6fa4e..9e86d39989 100644 --- a/charts/vald/templates/discoverer/pdb.yaml +++ b/charts/vald/templates/discoverer/pdb.yaml @@ -35,4 +35,5 @@ spec: selector: matchLabels: app: {{ $discoverer.name }} + unhealthyPodEvictionPolicy: {{ $discoverer.unhealthyPodEvictionPolicy }} {{- end }} diff --git a/charts/vald/templates/gateway/filter/pdb.yaml b/charts/vald/templates/gateway/filter/pdb.yaml index 6c49c87768..185c66c8f6 100644 --- a/charts/vald/templates/gateway/filter/pdb.yaml +++ b/charts/vald/templates/gateway/filter/pdb.yaml @@ -35,4 +35,5 @@ spec: selector: matchLabels: app: {{ $gateway.name }} + unhealthyPodEvictionPolicy: {{ $gateway.unhealthyPodEvictionPolicy }} {{- end }} diff --git a/charts/vald/templates/gateway/ing.yaml b/charts/vald/templates/gateway/ing.yaml index 1e38f1307a..a76880a0f7 100644 --- a/charts/vald/templates/gateway/ing.yaml +++ b/charts/vald/templates/gateway/ing.yaml @@ -13,14 +13,17 @@ # See the License for the specific language governing permissions and # limitations under the License. # -{{- $filter := .Values.gateway.filter -}} -{{- $filterIngEnabled := and $filter.enabled $filter.ingress.enabled -}} -{{- $mirror := .Values.gateway.mirror -}} -{{- $mirrorIngEnabled := and $mirror.enabled $mirror.ingress.enabled -}} -{{- $lb := .Values.gateway.lb -}} -{{- $lbIngEnabled := and $lb.enabled $lb.ingress.enabled -}} -{{- $gateway := "" -}} -{{- $gatewayName := "" -}} +{{- $reflectionEnabled := .Values.defaults.server_config.servers.grpc.server.grpc.enable_reflection -}} +{{- $filter := .Values.gateway.filter -}} +{{- $filterIngEnabled := and $filter.enabled $filter.ingress.enabled -}} +{{- $filterReflectionEnabled := and $filterIngEnabled (default $reflectionEnabled $filter.server_config.servers.grpc.enable_reflection) -}} +{{- $mirror := .Values.gateway.mirror -}} +{{- $mirrorIngEnabled := and $mirror.enabled $mirror.ingress.enabled -}} +{{- $lb := .Values.gateway.lb -}} +{{- $lbIngEnabled := and $lb.enabled $lb.ingress.enabled -}} +{{- $lbReflectionEnabled := and $lbIngEnabled (default $reflectionEnabled $lb.server_config.servers.grpc.enable_reflection) -}} +{{- $gateway := "" -}} +{{- $gatewayName := "" -}} {{- if or $filterIngEnabled $mirrorIngEnabled $lbIngEnabled }} {{- if $filterIngEnabled }} {{- $gateway = $filter -}} @@ -62,125 +65,253 @@ spec: - host: {{ $gateway.ingress.host }} http: paths: - {{- if and $mirrorIngEnabled $filterIngEnabled $lb.enabled }} - - path: "/vald.v1.Search" + {{- if and $mirrorIngEnabled $filterIngEnabled $lb.enabled }} + - path: "/vald.v1.Search/" backend: service: name: {{ $filter.name }} {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} pathType: {{ $filter.ingress.pathType }} - - path: "/vald.v1.Insert" + - path: "/vald.v1.Insert/" backend: service: name: {{ $filter.name }} {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} pathType: {{ $filter.ingress.pathType }} - - path: "/vald.v1.Update" + # NOTE: Change backend service to mirror after UpdateTimestamp is implemented in mirror. + - path: "/vald.v1.Update/UpdateTimestamp" + backend: + service: + name: {{ $lb.name }} + {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} + pathType: {{ $lb.ingress.pathType }} + - path: "/vald.v1.Update/" backend: service: name: {{ $filter.name }} {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} pathType: {{ $filter.ingress.pathType }} - - path: "/vald.v1.Upsert" + - path: "/vald.v1.Upsert/" backend: service: name: {{ $filter.name }} {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} pathType: {{ $filter.ingress.pathType }} - - path: "/vald.v1.Index" + - path: "/vald.v1.Remove/" + backend: + service: + name: {{ $mirror.name }} + {{- include "vald.ingressPort" (dict "Values" $mirror.ingress) | nindent 12 }} + pathType: {{ $mirror.ingress.pathType }} + - path: "/vald.v1.Object/Exists" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Object.Exists" + - path: "/vald.v1.Object/GetTimestamp" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Object.GetTimestamp" + - path: "/vald.v1.Object/" + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} + - path: "/vald.v1.Index/" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Object" + # NOTE: Change backend service to mirror after Flush is implemented in mirror. + - path: "/vald.v1.Flush/" backend: service: - name: {{ $filter.name }} + name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} - pathType: {{ $filter.ingress.pathType }} - - backend: + pathType: {{ $lb.ingress.pathType }} + - path: "/mirror.v1.Mirror/Register" + backend: service: name: {{ $mirror.name }} {{- include "vald.ingressPort" (dict "Values" $mirror.ingress) | nindent 12 }} pathType: {{ $mirror.ingress.pathType }} + - path: "/vald.v1.Filter/" + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} {{- else if and $filterIngEnabled $lb.enabled }} - - path: "/vald.v1.Index" + - path: "/vald.v1.Search/" + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} + - path: "/vald.v1.Insert/" + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} + - path: "/vald.v1.Update/" + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} + - path: "/vald.v1.Upsert/" + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} + - path: "/vald.v1.Remove/" + backend: + service: + name: {{ $lb.name }} + {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} + pathType: {{ $lb.ingress.pathType }} + - path: "/vald.v1.Index/" + backend: + service: + name: {{ $lb.name }} + {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} + pathType: {{ $lb.ingress.pathType }} + - path: "/vald.v1.Object/Exists" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Object.Exists" + - path: "/vald.v1.Object/GetTimestamp" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Object.GetTimestamp" + - path: "/vald.v1.Object/" + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} + - path: "/vald.v1.Flush/" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - backend: + - path: "/vald.v1.Filter/" + backend: service: name: {{ $filter.name }} {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} pathType: {{ $filter.ingress.pathType }} {{- else if and $mirrorIngEnabled $lb.enabled }} - - path: "/vald.v1.Search" + - path: "/vald.v1.Search/" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Index" + - path: "/vald.v1.Insert/" + backend: + service: + name: {{ $mirror.name }} + {{- include "vald.ingressPort" (dict "Values" $mirror.ingress) | nindent 12 }} + pathType: {{ $mirror.ingress.pathType }} + # NOTE: Change backend service to mirror after UpdateTimestamp is implemented in mirror. + - path: "/vald.v1.Update/UpdateTimestamp" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Object.Exists" + - path: "/vald.v1.Update/" + backend: + service: + name: {{ $mirror.name }} + {{- include "vald.ingressPort" (dict "Values" $mirror.ingress) | nindent 12 }} + pathType: {{ $mirror.ingress.pathType }} + - path: "/vald.v1.Upsert/" + backend: + service: + name: {{ $mirror.name }} + {{- include "vald.ingressPort" (dict "Values" $mirror.ingress) | nindent 12 }} + pathType: {{ $mirror.ingress.pathType }} + - path: "/vald.v1.Remove/" + backend: + service: + name: {{ $mirror.name }} + {{- include "vald.ingressPort" (dict "Values" $mirror.ingress) | nindent 12 }} + pathType: {{ $mirror.ingress.pathType }} + - path: "/vald.v1.Object/" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Object.GetTimestamp" + - path: "/vald.v1.Index/" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - path: "/vald.v1.Object" + # NOTE: Change backend service to mirror after Flush is implemented in mirror. + - path: "/vald.v1.Flush/" backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} - - backend: + - path: "/mirror.v1.Mirror/Register" + backend: service: name: {{ $mirror.name }} {{- include "vald.ingressPort" (dict "Values" $mirror.ingress) | nindent 12 }} pathType: {{ $mirror.ingress.pathType }} {{- else if $lbIngEnabled }} - - backend: + - path: "/" + backend: + service: + name: {{ $lb.name }} + {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} + pathType: {{ $lb.ingress.pathType }} + {{- end }} + {{- if or $filterReflectionEnabled $lbReflectionEnabled }} + - path: "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" + {{- if $filterReflectionEnabled }} + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} + {{- else }} + backend: + service: + name: {{ $lb.name }} + {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} + pathType: {{ $lb.ingress.pathType }} + {{- end }} + - path: "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" + {{- if $filterReflectionEnabled }} + backend: + service: + name: {{ $filter.name }} + {{- include "vald.ingressPort" (dict "Values" $filter.ingress) | nindent 12 }} + pathType: {{ $filter.ingress.pathType }} + {{- else }} + backend: service: name: {{ $lb.name }} {{- include "vald.ingressPort" (dict "Values" $lb.ingress) | nindent 12 }} pathType: {{ $lb.ingress.pathType }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/vald/templates/gateway/lb/pdb.yaml b/charts/vald/templates/gateway/lb/pdb.yaml index c327dfc4f8..80c961634c 100644 --- a/charts/vald/templates/gateway/lb/pdb.yaml +++ b/charts/vald/templates/gateway/lb/pdb.yaml @@ -35,4 +35,5 @@ spec: selector: matchLabels: app: {{ $gateway.name }} + unhealthyPodEvictionPolicy: {{ $gateway.unhealthyPodEvictionPolicy }} {{- end }} diff --git a/charts/vald/templates/gateway/mirror/pdb.yaml b/charts/vald/templates/gateway/mirror/pdb.yaml index 762e9f7246..861cb72d92 100644 --- a/charts/vald/templates/gateway/mirror/pdb.yaml +++ b/charts/vald/templates/gateway/mirror/pdb.yaml @@ -35,4 +35,5 @@ spec: selector: matchLabels: app: {{ $gateway.name }} + unhealthyPodEvictionPolicy: {{ $gateway.unhealthyPodEvictionPolicy }} {{- end }} diff --git a/charts/vald/templates/manager/index/pdb.yaml b/charts/vald/templates/manager/index/pdb.yaml index 65138ba6d7..3fc5c046f4 100644 --- a/charts/vald/templates/manager/index/pdb.yaml +++ b/charts/vald/templates/manager/index/pdb.yaml @@ -35,4 +35,5 @@ spec: selector: matchLabels: app: {{ $index.name }} + unhealthyPodEvictionPolicy: {{ $index.unhealthyPodEvictionPolicy }} {{- end }} diff --git a/charts/vald/values.schema.json b/charts/vald/values.schema.json index d67182b679..ca25eaa809 100644 --- a/charts/vald/values.schema.json +++ b/charts/vald/values.schema.json @@ -160,12 +160,21 @@ "type": "string", "description": "a factor of load index timeout. timeout duration will be calculated by (index count to be loaded) * (factor)." }, - "m": { "type": "integer" }, + "m": { "type": "integer", "description": "m" }, "max_load_index_timeout": { "type": "string", "description": "maximum duration of load index timeout" }, - "metric_type": { "type": "string", "enum": ["innerproduct", "l2"] }, + "method_type": { + "type": "string", + "description": "method type it should be `ivfpq` or `binaryindex`", + "enum": ["ivfpq", "binaryindex"] + }, + "metric_type": { + "type": "string", + "description": "metric type it should be `innerproduct` or `l2`", + "enum": ["innerproduct", "l2"] + }, "min_load_index_timeout": { "type": "string", "description": "minimum duration of load index timeout" @@ -174,8 +183,11 @@ "type": "string", "description": "namespace of myself" }, - "nbits_per_idx": { "type": "integer" }, - "nlist": { "type": "integer" }, + "nbits_per_idx": { + "type": "integer", + "description": "nbits_per_idx" + }, + "nlist": { "type": "integer", "description": "nlist" }, "pod_name": { "type": "string", "description": "pod name of myself" @@ -1404,6 +1416,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -1466,6 +1482,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -1478,10 +1498,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -1965,18 +1997,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -3079,6 +3116,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -3141,6 +3182,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -3153,10 +3198,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -3487,6 +3544,11 @@ "description": "topology spread constraints of gateway pods", "items": { "type": "object" } }, + "unhealthyPodEvictionPolicy": { + "type": "string", + "description": "controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction.", + "enum": ["AlwaysAllow", "IfHealthyBudget"] + }, "version": { "type": "string", "description": "version of gateway config", @@ -3602,9 +3664,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -3621,10 +3688,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -3642,7 +3717,7 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": ["TraceInterceptor", "MetricInterceptor"] } }, "keepalive": { @@ -3662,6 +3737,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -3695,18 +3778,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -3779,10 +3867,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -4681,6 +4777,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -4743,6 +4843,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -4755,10 +4859,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -5158,18 +5274,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -6267,6 +6388,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -6329,6 +6454,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -6341,10 +6470,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -6663,6 +6804,11 @@ "description": "topology spread constraints of gateway pods", "items": { "type": "object" } }, + "unhealthyPodEvictionPolicy": { + "type": "string", + "description": "controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction.", + "enum": ["AlwaysAllow", "IfHealthyBudget"] + }, "version": { "type": "string", "description": "version of gateway config", @@ -6854,9 +7000,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -6873,10 +7024,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -6894,7 +7053,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -6914,6 +7076,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -6947,18 +7117,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -7031,10 +7206,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -7180,9 +7363,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -7199,10 +7387,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -7220,7 +7416,7 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": ["TraceInterceptor", "MetricInterceptor"] } }, "keepalive": { @@ -7240,6 +7436,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -7273,18 +7477,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -7357,10 +7566,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -7498,9 +7715,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -7517,10 +7739,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -7538,7 +7768,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -7558,6 +7791,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -7591,18 +7832,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -7675,10 +7921,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -8736,6 +8990,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -8798,6 +9056,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -8810,10 +9072,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -9125,6 +9399,11 @@ "description": "topology spread constraints of gateway pods", "items": { "type": "object" } }, + "unhealthyPodEvictionPolicy": { + "type": "string", + "description": "controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction.", + "enum": ["AlwaysAllow", "IfHealthyBudget"] + }, "version": { "type": "string", "description": "version of gateway config", @@ -9316,9 +9595,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -9335,10 +9619,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -9356,7 +9648,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -9376,6 +9671,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -9409,18 +9712,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -9493,10 +9801,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -9630,9 +9946,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -9649,10 +9970,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -9670,7 +9999,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -9690,6 +10022,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -9723,18 +10063,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -9807,10 +10152,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -9945,9 +10298,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -9964,10 +10322,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -9985,7 +10351,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -10005,6 +10374,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -10038,18 +10415,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -10122,10 +10504,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -11170,6 +11560,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -11232,6 +11626,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -11244,10 +11642,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -11559,6 +11969,11 @@ "description": "topology spread constraints of gateway pods", "items": { "type": "object" } }, + "unhealthyPodEvictionPolicy": { + "type": "string", + "description": "controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction.", + "enum": ["AlwaysAllow", "IfHealthyBudget"] + }, "version": { "type": "string", "description": "version of gateway config", @@ -11769,9 +12184,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -11788,10 +12208,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -11809,7 +12237,7 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": ["TraceInterceptor", "MetricInterceptor"] } }, "keepalive": { @@ -11829,6 +12257,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -11862,18 +12298,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -11946,10 +12387,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -12036,18 +12485,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -13118,6 +13572,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -13180,6 +13638,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -13192,10 +13654,22 @@ "type": "integer", "description": "gRPC server max send message size" }, - "read_buffer_size": { + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, + "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -13520,6 +13994,11 @@ "description": "topology spread constraints of gateway pods", "items": { "type": "object" } }, + "unhealthyPodEvictionPolicy": { + "type": "string", + "description": "controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction.", + "enum": ["AlwaysAllow", "IfHealthyBudget"] + }, "version": { "type": "string", "description": "version of gateway config", @@ -13607,6 +14086,61 @@ "corrector": { "type": "object", "properties": { + "affinity": { + "type": "object", + "properties": { + "nodeAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "node affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object", + "properties": { + "nodeSelectorTerms": { + "type": "array", + "description": "node affinity required node selectors", + "items": { "type": "object" } + } + } + } + } + }, + "podAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod affinity required scheduling terms", + "items": { "type": "object" } + } + } + }, + "podAntiAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod anti-affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod anti-affinity required scheduling terms", + "items": { "type": "object" } + } + } + } + } + }, "agent_namespace": { "type": "string", "description": "namespace of agent pods to manage" @@ -13706,9 +14240,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -13725,10 +14264,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -13746,7 +14293,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -13766,6 +14316,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -13799,18 +14357,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -13883,10 +14446,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -14020,9 +14591,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -14039,10 +14615,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -14060,7 +14644,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -14080,6 +14667,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -14113,18 +14708,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -14197,10 +14797,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -14349,9 +14957,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -14368,10 +14981,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -14389,7 +15010,7 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": ["TraceInterceptor", "MetricInterceptor"] } }, "keepalive": { @@ -14409,6 +15030,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -14442,18 +15071,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -14526,10 +15160,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -14606,6 +15248,10 @@ "type": "string", "description": "name of index correction job" }, + "nodeSelector": { + "type": "object", + "description": "node selector" + }, "node_name": { "type": "string", "description": "node name" }, "observability": { "type": "object", @@ -15427,6 +16073,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -15489,6 +16139,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -15501,10 +16155,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -15804,6 +16470,11 @@ "type": "boolean", "description": "CronJob suspend setting for index correction" }, + "tolerations": { + "type": "array", + "description": "tolerations", + "items": { "type": "object" } + }, "ttlSecondsAfterFinished": { "type": "integer", "description": "ttl setting for K8s completed jobs" @@ -15818,6 +16489,61 @@ "creator": { "type": "object", "properties": { + "affinity": { + "type": "object", + "properties": { + "nodeAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "node affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object", + "properties": { + "nodeSelectorTerms": { + "type": "array", + "description": "node affinity required node selectors", + "items": { "type": "object" } + } + } + } + } + }, + "podAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod affinity required scheduling terms", + "items": { "type": "object" } + } + } + }, + "podAntiAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod anti-affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod anti-affinity required scheduling terms", + "items": { "type": "object" } + } + } + } + } + }, "agent_namespace": { "type": "string", "description": "namespace of agent pods to manage" @@ -15926,9 +16652,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -15945,10 +16676,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -15966,7 +16705,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -15986,6 +16728,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -16019,18 +16769,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -16103,10 +16858,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -16240,9 +17003,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -16259,10 +17027,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -16280,7 +17056,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -16300,6 +17079,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -16333,18 +17120,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -16417,10 +17209,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -16504,6 +17304,10 @@ "type": "string", "description": "name of index creation job" }, + "nodeSelector": { + "type": "object", + "description": "node selector" + }, "node_name": { "type": "string", "description": "node name" }, "observability": { "type": "object", @@ -17325,6 +18129,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -17387,6 +18195,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -17399,10 +18211,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -17702,6 +18526,11 @@ "description": "indexing target addresses", "items": { "type": "string" } }, + "tolerations": { + "type": "array", + "description": "tolerations", + "items": { "type": "object" } + }, "ttlSecondsAfterFinished": { "type": "integer", "description": "ttl setting for K8s completed jobs" @@ -17875,9 +18704,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -17894,10 +18728,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -17915,7 +18757,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -17935,6 +18780,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -17968,18 +18821,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -18052,10 +18910,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -18189,9 +19055,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -18208,10 +19079,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -18229,7 +19108,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -18249,6 +19131,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -18282,18 +19172,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -18366,10 +19261,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -19590,6 +20493,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -19652,6 +20559,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -19664,10 +20575,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -20905,6 +21828,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -20967,6 +21894,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -20979,10 +21910,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -21333,6 +22276,61 @@ "saver": { "type": "object", "properties": { + "affinity": { + "type": "object", + "properties": { + "nodeAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "node affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "object", + "properties": { + "nodeSelectorTerms": { + "type": "array", + "description": "node affinity required node selectors", + "items": { "type": "object" } + } + } + } + } + }, + "podAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod affinity required scheduling terms", + "items": { "type": "object" } + } + } + }, + "podAntiAffinity": { + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod anti-affinity preferred scheduling terms", + "items": { "type": "object" } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "type": "array", + "description": "pod anti-affinity required scheduling terms", + "items": { "type": "object" } + } + } + } + } + }, "agent_namespace": { "type": "string", "description": "namespace of agent pods to manage" @@ -21437,9 +22435,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -21456,10 +22459,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -21477,7 +22488,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -21497,6 +22511,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -21530,18 +22552,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -21614,10 +22641,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -21751,9 +22786,14 @@ } } }, + "content_subtype": { "type": "string" }, "dial_option": { "type": "object", "properties": { + "authority": { + "type": "string", + "description": "gRPC client dial option authority" + }, "backoff_base_delay": { "type": "string", "description": "gRPC client dial option base backoff delay" @@ -21770,10 +22810,18 @@ "type": "number", "description": "gRPC client dial option base backoff delay" }, + "disable_retry": { + "type": "boolean", + "description": "gRPC client dial option disables retry" + }, "enable_backoff": { "type": "boolean", "description": "gRPC client dial option backoff enabled" }, + "idle_timeout": { + "type": "string", + "description": "gRPC client dial option idle_timeout" + }, "initial_connection_window_size": { "type": "integer", "description": "gRPC client dial option initial connection window size" @@ -21791,7 +22839,10 @@ "description": "gRPC client interceptors", "items": { "type": "string", - "enum": ["TraceInterceptor"] + "enum": [ + "TraceInterceptor", + "MetricInterceptor" + ] } }, "keepalive": { @@ -21811,6 +22862,14 @@ } } }, + "max_call_attempts": { + "type": "integer", + "description": "gRPC client dial option number of max call attempts" + }, + "max_header_list_size": { + "type": "integer", + "description": "gRPC client dial option max header list size" + }, "max_msg_size": { "type": "integer", "description": "gRPC client dial option max message size" @@ -21844,18 +22903,23 @@ "properties": { "cache_enabled": { "type": "boolean", - "description": "gRPC client TCP DNS cache enabled" + "description": "gRPC client DNS cache enabled" }, "cache_expiration": { "type": "string", - "description": "gRPC client TCP DNS cache expiration" + "description": "gRPC client DNS cache expiration" }, "refresh_duration": { "type": "string", - "description": "gRPC client TCP DNS cache refresh duration" + "description": "gRPC client DNS cache refresh duration" } } }, + "network": { + "type": "string", + "description": "gRPC client dialer network type", + "enum": ["tcp", "udp", "unix"] + }, "socket_option": { "type": "object", "properties": { @@ -21928,10 +22992,18 @@ "type": "integer", "description": "gRPC client dial option read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC client dial option sharing write buffer" + }, "timeout": { "type": "string", "description": "gRPC client dial option timeout" }, + "user_agent": { + "type": "string", + "description": "gRPC client dial option user_agent" + }, "write_buffer_size": { "type": "integer", "description": "gRPC client dial option write buffer size" @@ -22015,6 +23087,10 @@ "type": "string", "description": "name of index save job" }, + "nodeSelector": { + "type": "object", + "description": "node selector" + }, "node_name": { "type": "string", "description": "node name" }, "observability": { "type": "object", @@ -22836,6 +23912,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -22898,6 +23978,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -22910,10 +23994,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -23213,6 +24309,11 @@ "description": "index saving target addresses", "items": { "type": "string" } }, + "tolerations": { + "type": "array", + "description": "tolerations", + "items": { "type": "object" } + }, "ttlSecondsAfterFinished": { "type": "integer", "description": "ttl setting for K8s completed jobs" @@ -23931,6 +25032,10 @@ "type": "boolean", "description": "gRPC server admin option" }, + "enable_channelz": { + "type": "boolean", + "description": "gRPC server channelz option" + }, "enable_reflection": { "type": "boolean", "description": "gRPC server reflection option" @@ -23993,6 +25098,10 @@ } } }, + "max_concurrent_streams": { + "type": "integer", + "description": "gRPC server max concurrent stream size" + }, "max_header_list_size": { "type": "integer", "description": "gRPC server max header list size" @@ -24005,10 +25114,22 @@ "type": "integer", "description": "gRPC server max send message size" }, + "num_stream_workers": { + "type": "integer", + "description": "gRPC server number of stream workers" + }, "read_buffer_size": { "type": "integer", "description": "gRPC server read buffer size" }, + "shared_write_buffer": { + "type": "boolean", + "description": "gRPC server write buffer sharing option" + }, + "wait_for_handlers": { + "type": "boolean", + "description": "gRPC server wait for handlers when stop" + }, "write_buffer_size": { "type": "integer", "description": "gRPC server write buffer size" @@ -24320,6 +25441,11 @@ "description": "topology spread constraints of gateway pods", "items": { "type": "object" } }, + "unhealthyPodEvictionPolicy": { + "type": "string", + "description": "controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction.", + "enum": ["AlwaysAllow", "IfHealthyBudget"] + }, "version": { "type": "string", "description": "version of gateway config", diff --git a/charts/vald/values.yaml b/charts/vald/values.yaml index 8c8dbd3bd0..d6049c643a 100644 --- a/charts/vald/values.yaml +++ b/charts/vald/values.yaml @@ -38,7 +38,7 @@ defaults: image: # @schema {"name": "defaults.image.tag", "type": "string"} # defaults.image.tag -- docker image tag - tag: v1.7.13 + tag: v1.7.16 # @schema {"name": "defaults.server_config", "type": "object", "anchor": "server_config"} server_config: # @schema {"name": "defaults.server_config.servers", "type": "object"} @@ -238,16 +238,31 @@ defaults: # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.header_table_size", "type": "integer"} # defaults.server_config.servers.grpc.server.grpc.header_table_size -- gRPC server header table size header_table_size: 0 + # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.max_concurrent_streams", "type": "integer"} + # defaults.server_config.servers.grpc.server.grpc.max_concurrent_streams -- gRPC server max concurrent stream size + max_concurrent_streams: 0 + # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.num_stream_workers", "type": "integer"} + # defaults.server_config.servers.grpc.server.grpc.num_stream_workers -- gRPC server number of stream workers + num_stream_workers: 0 # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.interceptors", "type": "array", "items": {"type": "string", "enum": ["RecoverInterceptor", "AccessLogInterceptor", "TraceInterceptor", "MetricInterceptor"]}} # defaults.server_config.servers.grpc.server.grpc.interceptors -- gRPC server interceptors interceptors: - "RecoverInterceptor" + # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.shared_write_buffer", "type": "boolean"} + # defaults.server_config.servers.grpc.server.grpc.shared_write_buffer -- gRPC server write buffer sharing option + shared_write_buffer: false + # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.wait_for_handlers", "type": "boolean"} + # defaults.server_config.servers.grpc.server.grpc.wait_for_handlers -- gRPC server wait for handlers when stop + wait_for_handlers: true # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.enable_reflection", "type": "boolean"} # defaults.server_config.servers.grpc.server.grpc.enable_reflection -- gRPC server reflection option enable_reflection: true # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.enable_admin", "type": "boolean"} # defaults.server_config.servers.grpc.server.grpc.enable_admin -- gRPC server admin option enable_admin: true + # @schema {"name": "defaults.server_config.servers.grpc.server.grpc.enable_channelz", "type": "boolean"} + # defaults.server_config.servers.grpc.server.grpc.enable_channelz -- gRPC server channelz option + enable_channelz: true # @schema {"name": "defaults.server_config.servers.grpc.server.socket_option", "alias": "socket_option"} socket_option: # defaults.server_config.servers.grpc.server.socket_option.reuse_port -- server listen socket option for reuse_port functionality @@ -530,7 +545,7 @@ defaults: reuse_port: true # defaults.server_config.healths.readiness.server.socket_option.reuse_addr -- server listen socket option for reuse_addr functionality reuse_addr: true - # defaults.server_config.healths.readiness.server.socket_option.tcp_fast_oepn -- server listen socket option for tcp_fast_open functionality + # defaults.server_config.healths.readiness.server.socket_option.tcp_fast_open -- server listen socket option for tcp_fast_open functionality tcp_fast_open: true # defaults.server_config.healths.readiness.server.socket_option.tcp_no_delay -- server listen socket option for tcp_no_delay functionality tcp_no_delay: true @@ -738,6 +753,9 @@ defaults: # @schema {"name": "defaults.grpc.client.max_send_msg_size", "type": "integer"} # defaults.grpc.client.call_option.max_send_msg_size -- gRPC client call option max send message size max_send_msg_size: 0 + # @schema {"name": "defaults.grpc.client.content_subtype", "type": "string"} + # defaults.grpc.client.call_option.content_subtype -- gRPC client call option content subtype + content_subtype: "" # @schema {"name": "defaults.grpc.client.dial_option", "type": "object"} dial_option: # @schema {"name": "defaults.grpc.client.dial_option.write_buffer_size", "type": "integer"} @@ -755,6 +773,12 @@ defaults: # @schema {"name": "defaults.grpc.client.dial_option.max_msg_size", "type": "integer"} # defaults.grpc.client.dial_option.max_msg_size -- gRPC client dial option max message size max_msg_size: 0 + # @schema {"name": "defaults.grpc.client.dial_option.max_header_list_size", "type": "integer"} + # defaults.grpc.client.dial_option.max_header_list_size -- gRPC client dial option max header list size + max_header_list_size: 0 + # @schema {"name": "defaults.grpc.client.dial_option.max_call_attempts", "type": "integer"} + # defaults.grpc.client.dial_option.max_call_attempts -- gRPC client dial option number of max call attempts + max_call_attempts: 0 # @schema {"name": "defaults.grpc.client.dial_option.backoff_max_delay", "type": "string"} # defaults.grpc.client.dial_option.backoff_max_delay -- gRPC client dial option max backoff delay backoff_max_delay: "120s" @@ -773,27 +797,45 @@ defaults: # @schema {"name": "defaults.grpc.client.dial_option.enable_backoff", "type": "boolean"} # defaults.grpc.client.dial_option.enable_backoff -- gRPC client dial option backoff enabled enable_backoff: false + # @schema {"name": "defaults.grpc.client.dial_option.disable_retry", "type": "boolean"} + # defaults.grpc.client.dial_option.disable_retry -- gRPC client dial option disables retry + disable_retry: false # @schema {"name": "defaults.grpc.client.dial_option.insecure", "type": "boolean"} # defaults.grpc.client.dial_option.insecure -- gRPC client dial option insecure enabled insecure: true + # @schema {"name": "defaults.grpc.client.dial_option.shared_write_buffer", "type": "boolean"} + # defaults.grpc.client.dial_option.shared_write_buffer -- gRPC client dial option sharing write buffer + shared_write_buffer: false + # @schema {"name": "defaults.grpc.client.dial_option.authority", "type": "string"} + # defaults.grpc.client.dial_option.authority -- gRPC client dial option authority + authority: "" + # @schema {"name": "defaults.grpc.client.dial_option.idle_timeout", "type": "string"} + # defaults.grpc.client.dial_option.idle_timeout -- gRPC client dial option idle_timeout + idle_timeout: "1h" + # @schema {"name": "defaults.grpc.client.dial_option.user_agent", "type": "string"} + # defaults.grpc.client.dial_option.user_agent -- gRPC client dial option user_agent + user_agent: "Vald-gRPC" # @schema {"name": "defaults.grpc.client.dial_option.timeout", "type": "string"} # defaults.grpc.client.dial_option.timeout -- gRPC client dial option timeout timeout: "" - # @schema {"name": "defaults.grpc.client.dial_option.interceptors", "type": "array", "items": {"type": "string", "enum": ["TraceInterceptor"]}} + # @schema {"name": "defaults.grpc.client.dial_option.interceptors", "type": "array", "items": {"type": "string", "enum": ["TraceInterceptor", "MetricInterceptor"]}} # defaults.grpc.client.dial_option.interceptors -- gRPC client interceptors interceptors: [] # @schema {"name": "defaults.grpc.client.dial_option.net", "type": "object", "anchor": "net"} net: + # @schema {"name": "defaults.grpc.client.dial_option.net.network", "type": "string", "enum": ["tcp", "udp", "unix"]} + # defaults.grpc.client.dial_option.net.network -- gRPC client dialer network type + network: tcp # @schema {"name": "defaults.grpc.client.dial_option.net.dns", "type": "object"} dns: # @schema {"name": "defaults.grpc.client.dial_option.net.dns.cache_enabled", "type": "boolean"} - # defaults.grpc.client.dial_option.net.dns.cache_enabled -- gRPC client TCP DNS cache enabled + # defaults.grpc.client.dial_option.net.dns.cache_enabled -- gRPC client DNS cache enabled cache_enabled: true # @schema {"name": "defaults.grpc.client.dial_option.net.dns.refresh_duration", "type": "string"} - # defaults.grpc.client.dial_option.net.dns.refresh_duration -- gRPC client TCP DNS cache refresh duration + # defaults.grpc.client.dial_option.net.dns.refresh_duration -- gRPC client DNS cache refresh duration refresh_duration: 30m # @schema {"name": "defaults.grpc.client.dial_option.net.dns.cache_expiration", "type": "string"} - # defaults.grpc.client.dial_option.net.dns.cache_expiration -- gRPC client TCP DNS cache expiration + # defaults.grpc.client.dial_option.net.dns.cache_expiration -- gRPC client DNS cache expiration cache_expiration: 1h # @schema {"name": "defaults.grpc.client.dial_option.net.dialer", "type": "object"} dialer: @@ -993,6 +1035,9 @@ gateway: # @schema {"name": "gateway.lb.maxUnavailable", "type": "string"} # gateway.lb.maxUnavailable -- maximum number of unavailable replicas maxUnavailable: 50% + # @schema {"name": "gateway.lb.unhealthyPodEvictionPolicy", "type": "string", "enum": ["AlwaysAllow", "IfHealthyBudget"]} + # gateway.lb.unhealthyPodEvictionPolicy -- controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. + unhealthyPodEvictionPolicy: "AlwaysAllow" # @schema {"name": "gateway.lb.revisionHistoryLimit", "type": "integer", "minimum": 0} # gateway.lb.revisionHistoryLimit -- number of old history to retain to allow rollback revisionHistoryLimit: 2 @@ -1075,11 +1120,13 @@ gateway: name: wait-for-discoverer target: discoverer image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 - type: wait-for name: wait-for-agent target: agent image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 # @schema {"name": "gateway.lb.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} # gateway.lb.env -- environment variables @@ -1275,6 +1322,9 @@ gateway: # @schema {"name": "gateway.filter.maxUnavailable", "type": "string"} # gateway.filter.maxUnavailable -- maximum number of unavailable replicas maxUnavailable: 50% + # @schema {"name": "gateway.filter.unhealthyPodEvictionPolicy", "type": "string", "enum": ["AlwaysAllow", "IfHealthyBudget"]} + # gateway.filter.unhealthyPodEvictionPolicy -- controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. + unhealthyPodEvictionPolicy: "AlwaysAllow" # @schema {"name": "gateway.filter.revisionHistoryLimit", "type": "integer", "minimum": 0} # gateway.filter.revisionHistoryLimit -- number of old history to retain to allow rollback revisionHistoryLimit: 2 @@ -1357,6 +1407,7 @@ gateway: name: wait-for-gateway-lb target: gateway-lb image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 # @schema {"name": "gateway.filter.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} # gateway.filter.env -- environment variables @@ -1562,6 +1613,9 @@ gateway: # @schema {"name": "gateway.mirror.maxUnavailable", "type": "string"} # gateway.mirror.maxUnavailable -- maximum number of unavailable replicas maxUnavailable: 50% + # @schema {"name": "gateway.mirror.unhealthyPodEvictionPolicy", "type": "string", "enum": ["AlwaysAllow", "IfHealthyBudget"]} + # gateway.mirror.unhealthyPodEvictionPolicy -- controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. + unhealthyPodEvictionPolicy: "AlwaysAllow" # @schema {"name": "gateway.mirror.revisionHistoryLimit", "type": "integer", "minimum": 0} # gateway.mirror.revisionHistoryLimit -- number of old history to retain to allow rollback revisionHistoryLimit: 2 @@ -1644,6 +1698,7 @@ gateway: name: wait-for-gateway-lb target: gateway-lb image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 # @schema {"name": "gateway.mirror.env", "type": "array", "items": {"type": "object"}, "anchor": "env"} # gateway.mirror.env -- environment variables @@ -1772,12 +1827,14 @@ gateway: gateway_config: # @schema {"name": "gateway.mirror.gateway_config.net", "alias": "net"} net: + # gateway.mirror.gateway_config.net.network.cache_enabled -- gRPC client dialer network type + network: tcp dns: - # gateway.mirror.gateway_config.net.dns.cache_enabled -- TCP DNS cache enabled + # gateway.mirror.gateway_config.net.dns.cache_enabled -- DNS cache enabled cache_enabled: true - # gateway.mirror.gateway_config.net.dns.refresh_duration -- TCP DNS cache refresh duration + # gateway.mirror.gateway_config.net.dns.refresh_duration -- DNS cache refresh duration refresh_duration: 5m - # gateway.mirror.gateway_config.net.dns.cache_expiration -- TCP DNS cache expiration + # gateway.mirror.gateway_config.net.dns.cache_expiration -- DNS cache expiration cache_expiration: 24h dialer: # gateway.mirror.gateway_config.net.dialer.timeout -- TCP dialer timeout @@ -1912,6 +1969,9 @@ agent: # @schema {"name": "agent.maxUnavailable", "type": "string"} # agent.maxUnavailable -- maximum number of unavailable replicas maxUnavailable: "1" + # @schema {"name": "agent.unhealthyPodEvictionPolicy", "type": "string", "enum": ["AlwaysAllow", "IfHealthyBudget"]} + # agent.unhealthyPodEvictionPolicy -- controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. + unhealthyPodEvictionPolicy: "IfHealthyBudget" # @schema {"name": "agent.revisionHistoryLimit", "type": "integer", "minimum": 0} # agent.revisionHistoryLimit -- number of old history to retain to allow rollback revisionHistoryLimit: 2 @@ -2587,12 +2647,14 @@ agent: client: # @schema {"name": "agent.sidecar.config.client.net", "alias": "net"} net: + # agent.sidecar.config.client.net.network -- gRPC client dialer network type + network: tcp dns: - # agent.sidecar.config.client.net.dns.cache_enabled -- HTTP client TCP DNS cache enabled + # agent.sidecar.config.client.net.dns.cache_enabled -- HTTP client DNS cache enabled cache_enabled: true - # agent.sidecar.config.client.net.dns.refresh_duration -- HTTP client TCP DNS cache refresh duration + # agent.sidecar.config.client.net.dns.refresh_duration -- HTTP client DNS cache refresh duration refresh_duration: 1h - # agent.sidecar.config.client.net.dns.refresh_duration -- HTTP client TCP DNS cache expiration + # agent.sidecar.config.client.net.dns.refresh_duration -- HTTP client DNS cache expiration cache_expiration: 24h dialer: # agent.sidecar.config.client.net.dialer.timeout -- HTTP client TCP dialer connect timeout @@ -2747,6 +2809,9 @@ discoverer: # @schema {"name": "discoverer.maxUnavailable", "type": "string"} # discoverer.maxUnavailable -- maximum number of unavailable replicas maxUnavailable: 50% + # @schema {"name": "discoverer.unhealthyPodEvictionPolicy", "type": "string", "enum": ["AlwaysAllow", "IfHealthyBudget"]} + # discoverer.unhealthyPodEvictionPolicy -- controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. + unhealthyPodEvictionPolicy: "AlwaysAllow" # @schema {"name": "discoverer.revisionHistoryLimit", "type": "integer", "minimum": 0} # discoverer.revisionHistoryLimit -- number of old history to retain to allow rollback revisionHistoryLimit: 2 @@ -2962,12 +3027,14 @@ discoverer: fields: {} # @schema {"name": "discoverer.discoverer.net", "alias": "net"} net: + # discoverer.discoverer.net.network -- gRPC client dialer network type + network: tcp dns: - # discoverer.discoverer.net.dns.cache_enabled -- TCP DNS cache enabled + # discoverer.discoverer.net.dns.cache_enabled -- DNS cache enabled cache_enabled: true - # discoverer.discoverer.net.dns.refresh_duration -- TCP DNS cache refresh duration + # discoverer.discoverer.net.dns.refresh_duration -- DNS cache refresh duration refresh_duration: 5m - # discoverer.discoverer.net.dns.cache_expiration -- TCP DNS cache expiration + # discoverer.discoverer.net.dns.cache_expiration -- DNS cache expiration cache_expiration: 24h dialer: # discoverer.discoverer.net.dialer.timeout -- TCP dialer timeout @@ -3068,6 +3135,9 @@ manager: # @schema {"name": "manager.index.maxUnavailable", "type": "string"} # manager.index.maxUnavailable -- maximum number of unavailable replicas maxUnavailable: 50% + # @schema {"name": "manager.index.unhealthyPodEvictionPolicy", "type": "string", "enum": ["AlwaysAllow", "IfHealthyBudget"]} + # manager.index.unhealthyPodEvictionPolicy -- controls whether unhealthy pods can be evicted based on the application's healthy pod count, supporting either cautious or permissive eviction. + unhealthyPodEvictionPolicy: "AlwaysAllow" # @schema {"name": "manager.index.revisionHistoryLimit", "type": "integer", "minimum": 0} # manager.index.revisionHistoryLimit -- number of old history to retain to allow rollback revisionHistoryLimit: 2 @@ -3133,11 +3203,13 @@ manager: name: wait-for-agent target: agent image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 - type: wait-for name: wait-for-discoverer target: discoverer image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 # @schema {"name": "manager.index.env", "alias": "env"} # manager.index.env -- environment variables @@ -3293,11 +3365,13 @@ manager: name: wait-for-agent target: agent image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 - type: wait-for name: wait-for-discoverer target: discoverer image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 # @schema {"name": "manager.index.corrector.env", "alias": "env"} # manager.index.corrector.env -- environment variables @@ -3353,6 +3427,30 @@ manager: # @schema {"name": "manager.index.corrector.node_name", "type": "string"} # manager.index.corrector.node_name -- node name node_name: "" # _MY_NODE_NAME_ + # @schema {"name": "manager.index.corrector.nodeSelector", "alias": "nodeSelector"} + # manager.index.corrector.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "manager.index.corrector.tolerations", "alias": "tolerations"} + # manager.index.corrector.tolerations -- tolerations + tolerations: [] + # @schema {"name": "manager.index.corrector.affinity", "alias": "affinity"} + affinity: + nodeAffinity: + # manager.index.corrector.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: + # manager.index.corrector.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + podAffinity: + # manager.index.corrector.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.index.corrector.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + # manager.index.corrector.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.index.corrector.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] # @schema {"name": "manager.index.corrector.gateway", "alias": "grpc.client"} # manager.index.corrector.gateway -- gRPC client for gateway (overrides defaults.grpc.client) gateway: {} @@ -3403,11 +3501,13 @@ manager: name: wait-for-agent target: agent image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 - type: wait-for name: wait-for-discoverer target: discoverer image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 # @schema {"name": "manager.index.creator.env", "alias": "env"} # manager.index.creator.env -- environment variables @@ -3463,6 +3563,30 @@ manager: # @schema {"name": "manager.index.creator.node_name", "type": "string"} # manager.index.creator.node_name -- node name node_name: "" # _MY_NODE_NAME_ + # @schema {"name": "manager.index.creator.nodeSelector", "alias": "nodeSelector"} + # manager.index.creator.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "manager.index.creator.tolerations", "alias": "tolerations"} + # manager.index.creator.tolerations -- tolerations + tolerations: [] + # @schema {"name": "manager.index.creator.affinity", "alias": "affinity"} + affinity: + nodeAffinity: + # manager.index.creator.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: + # manager.index.creator.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + podAffinity: + # manager.index.creator.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.index.creator.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + # manager.index.creator.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.index.creator.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] # @schema {"name": "manager.index.creator.discoverer", "type": "object"} discoverer: # @schema {"name": "manager.index.creator.discoverer.duration", "type": "string"} @@ -3510,11 +3634,13 @@ manager: name: wait-for-agent target: agent image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 - type: wait-for name: wait-for-discoverer target: discoverer image: busybox:stable + imagePullPolicy: Always sleepDuration: 2 # @schema {"name": "manager.index.saver.env", "alias": "env"} # manager.index.saver.env -- environment variables @@ -3567,6 +3693,30 @@ manager: # @schema {"name": "manager.index.saver.node_name", "type": "string"} # manager.index.saver.node_name -- node name node_name: "" # _MY_NODE_NAME_ + # @schema {"name": "manager.index.saver.nodeSelector", "alias": "nodeSelector"} + # manager.index.saver.nodeSelector -- node selector + nodeSelector: {} + # @schema {"name": "manager.index.saver.tolerations", "alias": "tolerations"} + # manager.index.saver.tolerations -- tolerations + tolerations: [] + # @schema {"name": "manager.index.saver.affinity", "alias": "affinity"} + affinity: + nodeAffinity: + # manager.index.saver.affinity.nodeAffinity.preferredDuringSchedulingIgnoredDuringExecution -- node affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: + # manager.index.saver.affinity.nodeAffinity.requiredDuringSchedulingIgnoredDuringExecution.nodeSelectorTerms -- node affinity required node selectors + nodeSelectorTerms: [] + podAffinity: + # manager.index.saver.affinity.podAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.index.saver.affinity.podAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + # manager.index.saver.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity preferred scheduling terms + preferredDuringSchedulingIgnoredDuringExecution: [] + # manager.index.saver.affinity.podAntiAffinity.requiredDuringSchedulingIgnoredDuringExecution -- pod anti-affinity required scheduling terms + requiredDuringSchedulingIgnoredDuringExecution: [] # @schema {"name": "manager.index.saver.discoverer", "type": "object"} discoverer: # @schema {"name": "manager.index.saver.discoverer.duration", "type": "string"} diff --git a/dockers/agent/core/agent/Dockerfile b/dockers/agent/core/agent/Dockerfile index 3146e03671..caf56e6e0e 100644 --- a/dockers/agent/core/agent/Dockerfile +++ b/dockers/agent/core/agent/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -36,7 +37,7 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=agent/core/agent ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root ENV CARGO_HOME=${RUST_HOME}/cargo @@ -47,9 +48,10 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -62,14 +64,15 @@ RUN --mount=type=bind,target=.,rw \ locales \ git \ cmake \ - gcc \ g++ \ - unzip \ + gcc \ libssl-dev \ + unzip \ liblapack-dev \ libomp-dev \ libopenblas-dev \ gfortran \ + pkg-config \ && ldconfig \ && echo "${LANG} UTF-8" > /etc/locale.gen \ && ln -fs /usr/share/zoneinfo/${TZ} /etc/localtime \ diff --git a/dockers/agent/core/faiss/Dockerfile b/dockers/agent/core/faiss/Dockerfile index 3d0dd46f51..c369c9f429 100644 --- a/dockers/agent/core/faiss/Dockerfile +++ b/dockers/agent/core/faiss/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -64,10 +67,10 @@ RUN --mount=type=bind,target=.,rw \ locales \ git \ cmake \ - gcc \ g++ \ - unzip \ + gcc \ libssl-dev \ + unzip \ liblapack-dev \ libomp-dev \ libopenblas-dev \ diff --git a/dockers/agent/core/ngt/Dockerfile b/dockers/agent/core/ngt/Dockerfile index de2005fe87..8d4186b5af 100644 --- a/dockers/agent/core/ngt/Dockerfile +++ b/dockers/agent/core/ngt/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -64,10 +67,10 @@ RUN --mount=type=bind,target=.,rw \ locales \ git \ cmake \ - gcc \ g++ \ - unzip \ + gcc \ libssl-dev \ + unzip \ liblapack-dev \ libomp-dev \ libopenblas-dev \ diff --git a/dockers/agent/sidecar/Dockerfile b/dockers/agent/sidecar/Dockerfile index 15f68d9a04..5fdb77154c 100644 --- a/dockers/agent/sidecar/Dockerfile +++ b/dockers/agent/sidecar/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/binfmt/Dockerfile b/dockers/binfmt/Dockerfile index 25f6c07d29..0be93ba3d9 100644 --- a/dockers/binfmt/Dockerfile +++ b/dockers/binfmt/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # diff --git a/dockers/buildbase/Dockerfile b/dockers/buildbase/Dockerfile index 85286ef951..5dde4958c7 100644 --- a/dockers/buildbase/Dockerfile +++ b/dockers/buildbase/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # diff --git a/dockers/buildkit/Dockerfile b/dockers/buildkit/Dockerfile index 06329957fa..43a5a5b0b3 100644 --- a/dockers/buildkit/Dockerfile +++ b/dockers/buildkit/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # diff --git a/dockers/buildkit/syft/scanner/Dockerfile b/dockers/buildkit/syft/scanner/Dockerfile index 94a2b2c13e..87be4558e9 100644 --- a/dockers/buildkit/syft/scanner/Dockerfile +++ b/dockers/buildkit/syft/scanner/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # diff --git a/dockers/ci/base/Dockerfile b/dockers/ci/base/Dockerfile index ba6c129c19..ebaf45a89f 100644 --- a/dockers/ci/base/Dockerfile +++ b/dockers/ci/base/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -41,7 +42,7 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=ci/base ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root ENV CARGO_HOME=${RUST_HOME}/cargo @@ -52,11 +53,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -70,20 +73,22 @@ RUN --mount=type=bind,target=.,rw \ git \ npm \ cmake \ - gcc \ g++ \ - unzip \ + gcc \ libssl-dev \ + unzip \ liblapack-dev \ libomp-dev \ libopenblas-dev \ gfortran \ + pkg-config \ + file \ gawk \ gnupg2 \ graphviz \ jq \ - libhdf5-dev \ libaec-dev \ + libhdf5-dev \ sed \ zip \ && ldconfig \ @@ -111,15 +116,15 @@ RUN --mount=type=bind,target=.,rw \ && make kind/install \ && make kubectl/install \ && make kubelinter/install \ - && make reviewdog/install \ - && make tparse/install \ - && make valdcli/install \ - && make yq/install \ && make minikube/install \ + && make reviewdog/install \ && make stern/install \ && make telepresence/install \ + && make tparse/install \ + && make yq/install \ && make ngt/install \ && make faiss/install \ + && make usearch/install \ && rm -rf ${GOPATH}/src/github.com/${ORG}/${REPO}/* # skipcq: DOK-DL3002 USER root:root diff --git a/dockers/dev/Dockerfile b/dockers/dev/Dockerfile index cdb863ac99..290b99c402 100644 --- a/dockers/dev/Dockerfile +++ b/dockers/dev/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -18,7 +19,7 @@ # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go ARG UPX_OPTIONS=-9 # skipcq: DOK-DL3026,DOK-DL3007 -FROM mcr.microsoft.com/devcontainers/base:ubuntu22.04 +FROM mcr.microsoft.com/devcontainers/base:ubuntu24.04 LABEL maintainer="vdaas.org vald team " # skipcq: DOK-DL3002 USER root:root @@ -41,7 +42,7 @@ ENV LC_ALL=en_US.UTF-8 ENV ORG=vdaas ENV PKG=dev ENV REPO=vald -ENV RUST_HOME=/usr/loacl/lib/rust +ENV RUST_HOME=/usr/local/lib/rust ENV TZ=Etc/UTC ENV USER=root ENV CARGO_HOME=${RUST_HOME}/cargo @@ -52,43 +53,42 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ && apt-get upgrade -y \ && apt-get install -y --no-install-recommends --fix-missing \ - curl \ - gnupg \ - software-properties-common \ - && add-apt-repository ppa:ubuntu-toolchain-r/test -y \ - && apt-get update -y \ - && apt-get upgrade -y \ - && apt-get install -y --no-install-recommends --fix-missing \ build-essential \ ca-certificates \ + curl \ + gnupg \ tzdata \ locales \ git \ cmake \ - gcc \ g++ \ - unzip \ + gcc \ libssl-dev \ + unzip \ liblapack-dev \ libomp-dev \ libopenblas-dev \ gfortran \ + pkg-config \ + file \ gawk \ gnupg2 \ graphviz \ jq \ - libhdf5-dev \ libaec-dev \ + libhdf5-dev \ sed \ zip \ && ldconfig \ @@ -131,13 +131,12 @@ RUN --mount=type=bind,target=.,rw \ && make kind/install \ && make kubectl/install \ && make kubelinter/install \ - && make reviewdog/install \ - && make tparse/install \ - && make valdcli/install \ - && make yq/install \ && make minikube/install \ + && make reviewdog/install \ && make stern/install \ && make telepresence/install \ + && make tparse/install \ + && make yq/install \ && make ngt/install \ && make faiss/install \ && rm -rf ${GOPATH}/src/github.com/${ORG}/${REPO}/* diff --git a/dockers/discoverer/k8s/Dockerfile b/dockers/discoverer/k8s/Dockerfile index cf88c12f3e..090b7ddc7e 100644 --- a/dockers/discoverer/k8s/Dockerfile +++ b/dockers/discoverer/k8s/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/gateway/filter/Dockerfile b/dockers/gateway/filter/Dockerfile index b7de1a1100..cfdcee9020 100644 --- a/dockers/gateway/filter/Dockerfile +++ b/dockers/gateway/filter/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/gateway/lb/Dockerfile b/dockers/gateway/lb/Dockerfile index 734976d6b4..73478fa5ab 100644 --- a/dockers/gateway/lb/Dockerfile +++ b/dockers/gateway/lb/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/gateway/mirror/Dockerfile b/dockers/gateway/mirror/Dockerfile index 014c77b0da..2c9e11874e 100644 --- a/dockers/gateway/mirror/Dockerfile +++ b/dockers/gateway/mirror/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/index/job/correction/Dockerfile b/dockers/index/job/correction/Dockerfile index d4e416b36d..d43c312fb7 100644 --- a/dockers/index/job/correction/Dockerfile +++ b/dockers/index/job/correction/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/index/job/creation/Dockerfile b/dockers/index/job/creation/Dockerfile index d379448172..7b56415884 100644 --- a/dockers/index/job/creation/Dockerfile +++ b/dockers/index/job/creation/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/index/job/readreplica/rotate/Dockerfile b/dockers/index/job/readreplica/rotate/Dockerfile index ca059a76f8..41861347e0 100644 --- a/dockers/index/job/readreplica/rotate/Dockerfile +++ b/dockers/index/job/readreplica/rotate/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/index/job/save/Dockerfile b/dockers/index/job/save/Dockerfile index 6a8ea10829..76723d1f64 100644 --- a/dockers/index/job/save/Dockerfile +++ b/dockers/index/job/save/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/index/operator/Dockerfile b/dockers/index/operator/Dockerfile index c62067897b..4195fff32a 100644 --- a/dockers/index/operator/Dockerfile +++ b/dockers/index/operator/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/manager/index/Dockerfile b/dockers/manager/index/Dockerfile index 3e3f724b73..0212228172 100644 --- a/dockers/manager/index/Dockerfile +++ b/dockers/manager/index/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/operator/helm/Dockerfile b/dockers/operator/helm/Dockerfile index a0f9711a81..b40530fc31 100644 --- a/dockers/operator/helm/Dockerfile +++ b/dockers/operator/helm/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -51,11 +52,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/tools/benchmark/job/Dockerfile b/dockers/tools/benchmark/job/Dockerfile index 7f2aa43e34..260f95034e 100644 --- a/dockers/tools/benchmark/job/Dockerfile +++ b/dockers/tools/benchmark/job/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -64,10 +67,10 @@ RUN --mount=type=bind,target=.,rw \ locales \ git \ cmake \ - gcc \ g++ \ - unzip \ + gcc \ libssl-dev \ + unzip \ libhdf5-dev \ libaec-dev \ && ldconfig \ diff --git a/dockers/tools/benchmark/operator/Dockerfile b/dockers/tools/benchmark/operator/Dockerfile index 64fe1ea007..966ccf289f 100644 --- a/dockers/tools/benchmark/operator/Dockerfile +++ b/dockers/tools/benchmark/operator/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ diff --git a/dockers/tools/cli/loadtest/Dockerfile b/dockers/tools/cli/loadtest/Dockerfile index 2e90c328aa..e972744e1c 100644 --- a/dockers/tools/cli/loadtest/Dockerfile +++ b/dockers/tools/cli/loadtest/Dockerfile @@ -1,4 +1,5 @@ # syntax = docker/dockerfile:latest +# check=error=true # # Copyright (C) 2019-2025 vdaas.org vald team # @@ -47,11 +48,13 @@ SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 RUN --mount=type=bind,target=.,rw \ --mount=type=tmpfs,target=/tmp \ - --mount=type=cache,target=/var/lib/apt,sharing=locked \ - --mount=type=cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME} \ + --mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME} \ --mount=type=cache,target="${GOPATH}/pkg",id="go-build-${TARGETARCH}" \ - --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}"\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ + --mount=type=cache,target="${HOME}/.cache/go-build",id="go-build-${TARGETARCH}" \ + --mount=type=tmpfs,target="${GOPATH}/src" \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ @@ -64,10 +67,10 @@ RUN --mount=type=bind,target=.,rw \ locales \ git \ cmake \ - gcc \ g++ \ - unzip \ + gcc \ libssl-dev \ + unzip \ libhdf5-dev \ libaec-dev \ && ldconfig \ diff --git a/docs/api/insert.md b/docs/api/insert.md index ef9e2dd43f..eae2bd7dce 100644 --- a/docs/api/insert.md +++ b/docs/api/insert.md @@ -100,7 +100,7 @@ Inset RPC is the method to add a new single vector. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -226,7 +226,7 @@ It's the recommended method to insert a large number of vectors. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -349,7 +349,7 @@ Please be careful that the size of the request exceeds the limit. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting diff --git a/docs/api/object.md b/docs/api/object.md index ca4b0e89c2..a9e37ea9d0 100644 --- a/docs/api/object.md +++ b/docs/api/object.md @@ -67,7 +67,7 @@ Exists RPC is the method to check that a vector exists in the `vald-agent`. | 5 | NOT_FOUND | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -148,7 +148,7 @@ GetObject RPC is the method to get the metadata of a vector inserted into the `v | 5 | NOT_FOUND | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -242,7 +242,7 @@ Each Upsert request and response are independent. | 5 | NOT_FOUND | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting diff --git a/docs/api/remove.md b/docs/api/remove.md index 156176fc70..2846ea08ee 100644 --- a/docs/api/remove.md +++ b/docs/api/remove.md @@ -101,7 +101,7 @@ Remove RPC is the method to remove a single vector. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -222,7 +222,7 @@ RemoveByTimestamp RPC is the method to remove vectors based on timestamp. | 5 | NOT_FOUND | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -344,7 +344,7 @@ It's the recommended method to remove a large number of vectors. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -465,7 +465,7 @@ Please be careful that the size of the request exceeds the limit. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting diff --git a/docs/api/search.md b/docs/api/search.md index dab76e0cec..ccec2863d0 100644 --- a/docs/api/search.md +++ b/docs/api/search.md @@ -146,7 +146,7 @@ Search RPC is the method to search vector(s) similar to the request vector. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -267,7 +267,7 @@ The vector with the same requested ID should be indexed into the `vald-agent` be | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -403,7 +403,7 @@ Each Search request and response are independent. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -539,7 +539,7 @@ Each SearchByID request and response are independent. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -684,7 +684,7 @@ Please be careful that the size of the request exceeds the limit. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -830,7 +830,7 @@ Please be careful that the size of the request exceeds the limit. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -946,7 +946,7 @@ LinearSearch RPC is the method to linear search vector(s) similar to the request | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -1064,7 +1064,7 @@ You will get a `NOT_FOUND` error if the vector isn't stored. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -1196,7 +1196,7 @@ Each LinearSearch request and response are independent. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -1328,7 +1328,7 @@ Each LinearSearchByID request and response are independent. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -1469,7 +1469,7 @@ Please be careful that the size of the request exceeds the limit. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -1611,7 +1611,7 @@ Please be careful that the size of the request exceeds the limit. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting diff --git a/docs/api/update.md b/docs/api/update.md index f1517b2a64..2fb68b05f7 100644 --- a/docs/api/update.md +++ b/docs/api/update.md @@ -102,7 +102,7 @@ Update RPC is the method to update a single vector. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -231,7 +231,7 @@ It's the recommended method to update the large amount of vectors. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -357,7 +357,7 @@ Please be careful that the size of the request exceeds the limit. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting diff --git a/docs/api/upsert.md b/docs/api/upsert.md index bd3128419d..9ed9f6572c 100644 --- a/docs/api/upsert.md +++ b/docs/api/upsert.md @@ -105,7 +105,7 @@ Upsert RPC is the method to update the inserted vector to a new single vector or | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -232,7 +232,7 @@ It’s the recommended method to upsert a large number of vectors. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting @@ -356,7 +356,7 @@ Please be careful that the size of the request exceeds the limit. | 10 | ABORTED | | 13 | INTERNAL | -Please refer to [Response Status Code](./status.md) for more details. +Please refer to [Response Status Code](../status.md) for more details. ### Troubleshooting diff --git a/docs/contributing/coding-style.md b/docs/contributing/coding-style.md index ffd67cef7c..c5bb690079 100644 --- a/docs/contributing/coding-style.md +++ b/docs/contributing/coding-style.md @@ -1407,4 +1407,4 @@ Since each package has its purpose, we decided to apply different strategies to For the rest of the `./pkg` packages, we decided to implement the unit test for the exported function only. -Please follow the [unit test guideline](./unit-test-guideline.md) for more details on how to implement good unit test. +Please follow the [unit test guideline](../unit-test-guideline.md) for more details on how to implement good unit test. diff --git a/docs/contributing/unit-test-guideline.md b/docs/contributing/unit-test-guideline.md index 833e65addf..41341bd704 100644 --- a/docs/contributing/unit-test-guideline.md +++ b/docs/contributing/unit-test-guideline.md @@ -128,7 +128,7 @@ You have to create unit tests for error patterns as the same as success patterns #### Advanced -##### Robust boudary test +##### Robust boundary test The previous section is about the basic test cases. The (robust) boundary test should be applied to cover more test coverage. diff --git a/docs/overview/about-vald.md b/docs/overview/about-vald.md index dc1fb00b39..f954bb35ba 100644 --- a/docs/overview/about-vald.md +++ b/docs/overview/about-vald.md @@ -57,7 +57,7 @@ Vald is easy to use, feature-rich and highly customizable as you needed. - You can configure the number of vector dimensions, the number of replica and etc. - Multi language supported - - Go, Java, Clojure, Node.js, and Python client library are supported. + - Go, Java, Node.js, and Python client library are supported. - gRPC APIs can be triggered by any programming languages which support gRPC. - REST API is also supported. diff --git a/docs/performance/continuous-benchmark.md b/docs/performance/continuous-benchmark.md index ef683b1d5d..b63e12d316 100644 --- a/docs/performance/continuous-benchmark.md +++ b/docs/performance/continuous-benchmark.md @@ -45,24 +45,24 @@ And, Benchmark Operator also applies it to the Kubernetes cluster based on `Vald **main properties** -| Name | mandatory | Description | type | sample | -| :------------------------- | :-------- | :-------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------- | :------------------------------------------------------------------------------------------- | -| target | \* | target Vald cluster | object | ref: [target](#target-prop) | -| dataset | \* | dataset information | object | ref: [dataset](#dataset-prop) | -| job_type | \* | execute job type | string enum: [insert, update, upsert, remove, search, getobject, exists] | search | -| repetition | | the number of job repetitions
default: `1` | integer | 1 | -| replica | | the number of job concurrent job executions
default: `1` | integer | 2 | -| rps | | designed request per sec to the target cluster
default: `1000` | integer | 1000 | -| concurrency_limit | | goroutine count limit for rps adjustment
default: `200` | integer | 20 | -| ttl_seconds_after_finished | | time until deletion of Pod after job end
default: `600` | integer | 120 | -| insert_config | | request config for insert job | object | ref: [config](#insert-cfg-props) | -| update_config | | request config for update job | object | ref: [config](#update-cfg-props) | -| upsert_config | | request config for upsert job | object | ref: [config](#upsert-cfg-props) | -| search_config | | request config for search job | object | ref: [config](#search-cfg-props) | -| remove_config | | request config for remove job | object | ref: [config](#remove-cfg-props) | -| object_config | | request config for object job | object | ref: [config](#object-cfg-props) | -| client_config | | gRPC client config for running benchmark job
Tune if can not getting the expected performance with default config. | object | ref: [defaults.grpc](https://github.com/vdaas/vald/blob/main/charts/vald/README.md) | -| server_config | | server config for benchmark job pod
Tune if can not getting the expected performance with default config. | object | ref: [defaults.server_config](https://github.com/vdaas/vald/blob/main/charts/vald/README.md) | +| Name | mandatory | Description | type | sample | +| :------------------------- | :-------- | :-------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------- | :--------------------------------------------------------------------------------------------- | +| target | \* | target Vald cluster | object | ref: [target](#target-prop) | +| dataset | \* | dataset information | object | ref: [dataset](#dataset-prop) | +| job_type | \* | execute job type | string enum: [insert, update, upsert, remove, search, getobject, exists] | search | +| repetition | | the number of job repetitions
default: `1` | integer | 1 | +| replica | | the number of job concurrent job executions
default: `1` | integer | 2 | +| rps | | designed request per sec to the target cluster
default: `1000` | integer | 1000 | +| concurrency_limit | | goroutine count limit for rps adjustment
default: `200` | integer | 20 | +| ttl_seconds_after_finished | | time until deletion of Pod after job end
default: `600` | integer | 120 | +| insert_config | | request config for insert job | object | ref: [config](#insert-cfg-props) | +| update_config | | request config for update job | object | ref: [config](#update-cfg-props) | +| upsert_config | | request config for upsert job | object | ref: [config](#upsert-cfg-props) | +| search_config | | request config for search job | object | ref: [config](#search-cfg-props) | +| remove_config | | request config for remove job | object | ref: [config](#remove-cfg-props) | +| object_config | | request config for object job | object | ref: [config](#object-cfg-props) | +| client_config | | gRPC client config for running benchmark job
Tune if can not getting the expected performance with default config. | object | ref: [defaults.grpc](https://github.com/vdaas/vald/blob/main/charts/vald/values.yaml) | +| server_config | | server config for benchmark job pod
Tune if can not getting the expected performance with default config. | object | ref: [defaults.server_config](https://github.com/vdaas/vald/blob/main/charts/vald/values.yaml) | diff --git a/docs/troubleshooting/client-side.md b/docs/troubleshooting/client-side.md index 6efc5f1ff3..a487c81383 100644 --- a/docs/troubleshooting/client-side.md +++ b/docs/troubleshooting/client-side.md @@ -55,4 +55,4 @@ Please check your CPU information. - [Provisioning Troubleshooting](../troubleshooting/provisioning.md) - [API Status](../api/status.md) -- [FAQ](../support/FAQ.md) +- [FAQ](/docs/support/faq) diff --git a/docs/tutorial/get-started-with-faiss-agent.md b/docs/tutorial/get-started-with-faiss-agent.md index 3b8dec1ad8..e47e3b1659 100644 --- a/docs/tutorial/get-started-with-faiss-agent.md +++ b/docs/tutorial/get-started-with-faiss-agent.md @@ -1,6 +1,6 @@ # Get Started -This tutorial is for those who have already completed [Get Started](https://github.com/vdaas/vald/blob/main/docs/tutorial/get-started.md). +This tutorial is for those who have already completed [Get Started](../tutorial/get-started.md). Please refer to Prepare the Kubernetes Cluster and others there. ## Deploy Vald on Kubernetes Cluster @@ -278,7 +278,7 @@ If you are interested, please refer to [SDKs](../user-guides/sdks.md).
```go ctx := context.Background() - conn, err := grpc.DialContext(ctx, grpcServerAddr, grpc.WithInsecure()) + conn, err := grpc.NewClient(grpcServerAddr, grpc.WithInsecure()) if err != nil { glg.Fatal(err) } diff --git a/docs/tutorial/get-started.md b/docs/tutorial/get-started.md index 5f14077cff..203379f444 100644 --- a/docs/tutorial/get-started.md +++ b/docs/tutorial/get-started.md @@ -333,7 +333,7 @@ If you are interested, please refer to [SDKs](../user-guides/sdks.md).
```go ctx := context.Background() - conn, err := grpc.DialContext(ctx, grpcServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(grpcServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { glg.Fatal(err) } diff --git a/docs/tutorial/vald-agent-standalone-on-k8s.md b/docs/tutorial/vald-agent-standalone-on-k8s.md index 669e03f285..13238002f4 100644 --- a/docs/tutorial/vald-agent-standalone-on-k8s.md +++ b/docs/tutorial/vald-agent-standalone-on-k8s.md @@ -236,7 +236,7 @@ This chapter uses [NGT](https://github.com/yahoojapan/ngt) as Vald Agent to perf ```go ctx := context.Background() - conn, err := grpc.DialContext(ctx, grpcServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(grpcServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { glg.Fatal(err) } diff --git a/docs/tutorial/vald-multicluster-on-k8s.md b/docs/tutorial/vald-multicluster-on-k8s.md index 6cdd344bc6..15d1498a16 100644 --- a/docs/tutorial/vald-multicluster-on-k8s.md +++ b/docs/tutorial/vald-multicluster-on-k8s.md @@ -88,7 +88,7 @@ In this section, you will deploy three Vald clusters consisting of `vald-agent-n git clone https://github.com/vdaas/vald.git && cd vald ``` -2. Deploy on the `vald-01` Namespace using [dev-vald-01.yaml](https://github.com/vdaas/vald/blob/feature/mirror-gateway-definition/charts/vald/values/multi-vald/dev-vald-01.yaml) and [values.yaml](https://github.com/vdaas/vald/blob/main/example/helm/values.yaml) +2. Deploy on the `vald-01` Namespace using [dev-vald-01.yaml](https://github.com/vdaas/vald/blob/main/charts/vald/values/multi-vald/dev-vald-01.yaml) and [values.yaml](https://github.com/vdaas/vald/blob/main/example/helm/values.yaml) ```bash helm install vald-cluster-01 charts/vald \ @@ -97,7 +97,7 @@ In this section, you will deploy three Vald clusters consisting of `vald-agent-n -n vald-01 ``` -3. Deploy on the `vald-02` Namespace using [dev-vald-02.yaml](https://github.com/vdaas/vald/blob/feature/mirror-gateway-definition/charts/vald/values/multi-vald/dev-vald-02.yaml) and [values.yaml](https://github.com/vdaas/vald/blob/main/example/helm/values.yaml) +3. Deploy on the `vald-02` Namespace using [dev-vald-02.yaml](https://github.com/vdaas/vald/blob/main/charts/vald/values/multi-vald/dev-vald-02.yaml) and [values.yaml](https://github.com/vdaas/vald/blob/main/example/helm/values.yaml) ```bash helm install vald-cluster-02 charts/vald \ @@ -106,7 +106,7 @@ In this section, you will deploy three Vald clusters consisting of `vald-agent-n -n vald-02 ``` -4. Deploy on the `vald-03` Namespace using [dev-vald-03.yaml](https://github.com/vdaas/vald/blob/feature/mirror-gateway-definition/charts/vald/values/multi-vald/dev-vald-03.yaml) and [values.yaml](https://github.com/vdaas/vald/blob/main/example/helm/values.yaml) +4. Deploy on the `vald-03` Namespace using [dev-vald-03.yaml](https://github.com/vdaas/vald/blob/main/charts/vald/values/multi-vald/dev-vald-03.yaml) and [values.yaml](https://github.com/vdaas/vald/blob/main/example/helm/values.yaml) ```bash helm install vald-cluster-03 charts/vald \ @@ -194,7 +194,7 @@ It requires applying the `ValdMirrorTarget` Custom Resource to the one Namespace When applied successfully, the destination information is automatically created on other Namespaces when interconnected with each `vald-mirror-gateway`. -This tutorial will deploy the [ValdMirrorTarget](https://github.com/vdaas/vald/tree/main/charts/vald/values/mirror-target.yaml) Custom Resource to the `vald-03` Namespace with the following command. +This tutorial will deploy the [ValdMirrorTarget](https://github.com/vdaas/vald/blob/main/charts/vald/values/multi-vald/mirror-target.yaml) Custom Resource to the `vald-03` Namespace with the following command. ```bash kubectl apply -f ./charts/vald/values/multi-vald/mirror-target.yaml -n vald-03 @@ -255,7 +255,7 @@ If you are interested, please refer to [SDKs](https://vald.vdaas.org/docs/user- 3. Run Example - We use [example/client/mirror/main.go](https://github.com/vdaas/vald/blob/feature/mirror-gateway-example/example/client/mirror/main.go) to run the example. + We use [example/client/mirror/main.go](https://github.com/vdaas/vald/blob/main/example/client/mirror/main.go) to run the example. This example will insert and index 400 vectors into the Vald cluster from the Fashion-MNIST dataset via [gRPC](https://grpc.io/). And then, after waiting for indexing, it will request to search the nearest vector 10 times to all Vald clusters. You will get the 10 nearest neighbor vectors for each search query. diff --git a/docs/user-guides/client-api-config.md b/docs/user-guides/client-api-config.md index 578910b77f..3abac4d34d 100644 --- a/docs/user-guides/client-api-config.md +++ b/docs/user-guides/client-api-config.md @@ -46,7 +46,7 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() target := "localhost:8080" - conn, err := grpc.DialContext(ctx, target) + conn, err := grpc.NewClient(target) if err != nil { panic(err) } @@ -162,7 +162,7 @@ func example() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() target := "localhost:8080" - conn, err := grpc.DialContext(ctx, target) + conn, err := grpc.NewClient(target) if err != nil { panic(err) } @@ -286,7 +286,7 @@ func example() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() target := "localhost:8080" - conn, err := grpc.DialContext(ctx, target) + conn, err := grpc.NewClient(target) if err != nil { panic(err) } @@ -470,7 +470,7 @@ func main() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() target := "localhost:8080" - conn, err := grpc.DialContext(ctx, target) + conn, err := grpc.NewClient(target) if err != nil { panic(err) } @@ -653,7 +653,7 @@ func example() { ctx, cancel := context.WithCancel(context.Background()) defer cancel() target := "localhost:8080" - conn, err := grpc.DialContext(ctx, target) + conn, err := grpc.NewClient(target) if err != nil { panic(err) } diff --git a/docs/user-guides/cluster-role-binding.md b/docs/user-guides/cluster-role-binding.md index 36404ec157..af66e6f6b3 100644 --- a/docs/user-guides/cluster-role-binding.md +++ b/docs/user-guides/cluster-role-binding.md @@ -141,7 +141,7 @@ In this section, we will describe how to configure it and how to customize these ### Cluster role configuration for Vald Mirror Gateway -By looking at the [cluster role configuration](https://github.com/vdaas/vald/blob/main/k8s/gatewat/mirror/clusterrole.yaml), the access right of the following resources are granted to the cluster role `gateway-mirror`. +By looking at the [cluster role configuration](https://github.com/vdaas/vald/blob/main/k8s/gateway/mirror/clusterrole.yaml), the access right of the following resources are granted to the cluster role `gateway-mirror`. ```yaml apiVersion: rbac.authorization.k8s.io/v1 diff --git a/docs/user-guides/filtering-configuration.md b/docs/user-guides/filtering-configuration.md index b2d864b093..05f7673bfc 100644 --- a/docs/user-guides/filtering-configuration.md +++ b/docs/user-guides/filtering-configuration.md @@ -153,7 +153,7 @@ func main() { // connect to the Vald cluster ctx := context.Background() - conn, err := grpc.DialContext(ctx, grpcServerAddr, grpc.WithInsecure()) + conn, err := grpc.NewClient(grpcServerAddr, grpc.WithInsecure()) if err != nil { log.Error(err) return diff --git a/docs/user-guides/observability-configuration.md b/docs/user-guides/observability-configuration.md index 677781f191..db965a9417 100644 --- a/docs/user-guides/observability-configuration.md +++ b/docs/user-guides/observability-configuration.md @@ -176,7 +176,7 @@ defaults: #### Specify the Telemetry attribute -You can add the component information to the attribute of telemetry data by editing `defaults.observability.otlp.attirbute`. +You can add the component information to the attribute of telemetry data by editing `defaults.observability.otlp.attribute`. E.g., when setting `vald-agent-ngt-0` as `agent.observability.otlp.attribute.pod_name`, `target_pod: vald-agent-ngt-0` will be added to the attribute. These attributes are set auto by the environment values, so Vald recommends using default values unless there is a specific reason. diff --git a/docs/user-guides/sdks.md b/docs/user-guides/sdks.md index 66b2dfb7fc..28ec3abdc7 100644 --- a/docs/user-guides/sdks.md +++ b/docs/user-guides/sdks.md @@ -16,7 +16,6 @@ Here is the list of Official SDKs. - [Java](https://github.com/vdaas/vald-client-java) - [Python](https://github.com/vdaas/vald-client-python) - [Node.js](https://github.com/vdaas/vald-client-node) -- [Clojure](https://github.com/vdaas/vald-client-clj) ## How to use SDKs diff --git a/example/client/agent/main.go b/example/client/agent/main.go index 9b89775877..96682f13d2 100644 --- a/example/client/agent/main.go +++ b/example/client/agent/main.go @@ -67,7 +67,7 @@ func main() { ctx := context.Background() // Create a Vald Agent client for connecting to the Vald cluster. - conn, err := grpc.DialContext(ctx, grpcServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(grpcServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { glg.Fatal(err) } diff --git a/example/client/go.mod b/example/client/go.mod index 977d0df4ce..ba1c798b47 100644 --- a/example/client/go.mod +++ b/example/client/go.mod @@ -1,42 +1,42 @@ module github.com/vdaas/vald/example/client -go 1.23.0 +go 1.23.4 replace ( github.com/envoyproxy/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/goccy/go-json => github.com/goccy/go-json v0.10.3 github.com/golang/protobuf => github.com/golang/protobuf v1.5.4 github.com/kpango/glg => github.com/kpango/glg v1.6.15 - github.com/pkg/sftp => github.com/pkg/sftp v1.13.6 - golang.org/x/crypto => golang.org/x/crypto v0.26.0 - golang.org/x/net => golang.org/x/net v0.28.0 - golang.org/x/text => golang.org/x/text v0.17.0 - google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/grpc => google.golang.org/grpc v1.65.0 - google.golang.org/protobuf => google.golang.org/protobuf v1.34.2 - gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.4.0 + github.com/pkg/sftp => github.com/pkg/sftp v1.13.7 + golang.org/x/crypto => golang.org/x/crypto v0.31.0 + golang.org/x/net => golang.org/x/net v0.33.0 + golang.org/x/text => golang.org/x/text v0.21.0 + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20241216192217-9240e9c98484 + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 + google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 + google.golang.org/grpc => google.golang.org/grpc v1.69.2 + google.golang.org/protobuf => google.golang.org/protobuf v1.36.0 gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 + sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.4.0 ) require ( github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1 github.com/kpango/glg v1.6.14 - github.com/vdaas/vald-client-go v1.7.13 + github.com/vdaas/vald-client-go v1.7.15 gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 - google.golang.org/grpc v1.64.1 + google.golang.org/grpc v1.67.1 ) require ( - buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2 // indirect + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.35.2-20241127180247-a33202765966.1 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/kpango/fastime v1.1.9 // indirect - github.com/planetscale/vtprotobuf v0.6.0 // indirect - golang.org/x/net v0.26.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240528184218-531527333157 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf // indirect - google.golang.org/protobuf v1.34.2 // indirect + github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect + golang.org/x/net v0.30.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/text v0.21.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20241015192408-796eee8c2d53 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 // indirect + google.golang.org/protobuf v1.36.0 // indirect ) diff --git a/example/client/go.mod.default b/example/client/go.mod.default index 127dcf1c62..774e7a9a4d 100644 --- a/example/client/go.mod.default +++ b/example/client/go.mod.default @@ -1,6 +1,6 @@ module github.com/vdaas/vald/example/client -go 1.23.0 +go 1.23.4 replace ( github.com/envoyproxy/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate latest @@ -16,6 +16,6 @@ replace ( google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc latest google.golang.org/grpc => google.golang.org/grpc latest google.golang.org/protobuf => google.golang.org/protobuf latest - gopkg.in/yaml.v2 => gopkg.in/yaml.v2 latest + sigs.k8s.io/yaml => sigs.k8s.io/yaml latest gopkg.in/yaml.v3 => gopkg.in/yaml.v3 latest ) diff --git a/example/client/go.sum b/example/client/go.sum index ddee44829b..f84abd9a75 100644 --- a/example/client/go.sum +++ b/example/client/go.sum @@ -1,40 +1,58 @@ -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2 h1:SZRVx928rbYZ6hEKUIN+vtGDkl7uotABRWGY4OAg5gM= -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2/go.mod h1:ylS4c28ACSI59oJrOdW4pHS4n0Hw4TgSPHn8rpHl4Yw= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.35.2-20241127180247-a33202765966.1 h1:jLd96rDDNJ+zIJxvV/L855VEtrjR0G4aePVDlCpf6kw= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.35.2-20241127180247-a33202765966.1/go.mod h1:mnHCFccv4HwuIAOHNGdiIc5ZYbBCvbTWZcodLN5wITI= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/kpango/fastime v1.1.9 h1:xVQHcqyPt5M69DyFH7g1EPRns1YQNap9d5eLhl/Jy84= github.com/kpango/fastime v1.1.9/go.mod h1:vyD7FnUn08zxY4b/QFBZVG+9EWMYsNl+QF0uE46urD4= github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1 h1:rxyM+7uaZQ35P9fbixdnld/h4AgEhODoubuy6A4nDdk= github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1/go.mod h1:CAYeq6us9NfnRkSz67/xKVIR6/vaY5ZQZRe6IVcaIKg= github.com/kpango/glg v1.6.15 h1:nw0xSxpSyrDIWHeb3dvnE08PW+SCbK+aYFETT75IeLA= github.com/kpango/glg v1.6.15/go.mod h1:cmsc7Yeu8AS3wHLmN7bhwENXOpxfq+QoqxCIk2FneRk= -github.com/planetscale/vtprotobuf v0.6.0 h1:nBeETjudeJ5ZgBHUz1fVHvbqUKnYOXNhsIEabROxmNA= -github.com/planetscale/vtprotobuf v0.6.0/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= +github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0= github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/vdaas/vald-client-go v1.7.13 h1:WGhy3buxn5ECgySfxd/t8ZCooF6UfZuwy6kqfKoFP7c= -github.com/vdaas/vald-client-go v1.7.13/go.mod h1:bH7XUMU/JnfRiv06V8J6Pahcjve4DUyX7wbw2lFvM64= +github.com/vdaas/vald-client-go v1.7.15 h1:uOUlmRh7aJx2nyT+9Iv28uNzqdc9xlXyWbN5nLNUALM= +github.com/vdaas/vald-client-go v1.7.15/go.mod h1:5PYD1Cf1UqgevuzofZNIEtcXatjQSaXwmn8xHvY74jA= +go.opentelemetry.io/otel v1.31.0 h1:NsJcKPIW0D0H3NgzPDHmo0WW6SptzPdqg/L1zsIm2hY= +go.opentelemetry.io/otel v1.31.0/go.mod h1:O0C14Yl9FgkjqcCZAsE053C13OaddMYr/hz6clDkEJE= +go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE= +go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY= +go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk= +go.opentelemetry.io/otel/sdk v1.31.0/go.mod h1:TfRbMdhvxIIr/B2N2LQW2S5v9m3gOQ/08KsbbO5BPT0= +go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4JjxTeYusH7zMc= +go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= +go.opentelemetry.io/otel/trace v1.31.0 h1:ffjsj1aRouKewfr85U2aGagJ46+MvodynlQ1HYdmJys= +go.opentelemetry.io/otel/trace v1.31.0/go.mod h1:TXZkRk7SM2ZQLtR6eoAWQFIHPvzQ06FJAsO1tJg480A= go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= go.uber.org/zap v1.24.0/go.mod h1:2kMP+WWQ8aoFoedH3T2sq6iJ2yDWpHbP0f6MQbS9Gkg= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 h1:vJpL69PeUullhJyKtTjHjENEmZU3BkO4e+fod7nKzgM= gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946/go.mod h1:BQUWDHIAygjdt1HnUPQ0eWqLN2n5FwJycrpYUVUOx2I= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o= +google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ= +google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= diff --git a/example/client/main.go b/example/client/main.go index 7f56675711..29746d6065 100644 --- a/example/client/main.go +++ b/example/client/main.go @@ -66,7 +66,7 @@ func main() { ctx := context.Background() // Create a Vald client for connecting to the Vald cluster. - conn, err := grpc.DialContext(ctx, grpcServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(grpcServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { glg.Fatal(err) } diff --git a/example/client/mirror/main.go b/example/client/mirror/main.go index 45ace4ae5c..20aeb5412f 100644 --- a/example/client/mirror/main.go +++ b/example/client/mirror/main.go @@ -68,7 +68,7 @@ func main() { // Creates Vald clients for connecting to Vald clusters. clients := make([]vald.Client, 0, len(grpcServerAddrs)) for _, addr := range grpcServerAddrs { - conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure()) + conn, err := grpc.NewClient(addr, grpc.WithInsecure()) if err != nil { glg.Fatal(err) } diff --git a/example/helm/values.yaml b/example/helm/values.yaml index eb538745ae..f4b4160624 100644 --- a/example/helm/values.yaml +++ b/example/helm/values.yaml @@ -62,7 +62,7 @@ agent: # The number of dimensions for feature vector of fashion-mnist dataset. dimension: 784 # We use L2-Norm for distance_type. - distance_type: cos + distance_type: l2 # The type of fashion-mnist's feature vectors. object_type: float # Check duration of automatic indexing. diff --git a/go.mod b/go.mod index 3e63824046..dde3bc3dea 100644 --- a/go.mod +++ b/go.mod @@ -1,31 +1,31 @@ module github.com/vdaas/vald -go 1.23.0 +go 1.23.4 replace ( - cloud.google.com/go => cloud.google.com/go v0.115.1 - cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.62.0 - cloud.google.com/go/compute => cloud.google.com/go/compute v1.27.5 - cloud.google.com/go/datastore => cloud.google.com/go/datastore v1.17.1 - cloud.google.com/go/firestore => cloud.google.com/go/firestore v1.16.0 - cloud.google.com/go/iam => cloud.google.com/go/iam v1.1.13 - cloud.google.com/go/kms => cloud.google.com/go/kms v1.18.5 - cloud.google.com/go/monitoring => cloud.google.com/go/monitoring v1.20.4 - cloud.google.com/go/pubsub => cloud.google.com/go/pubsub v1.41.0 - cloud.google.com/go/secretmanager => cloud.google.com/go/secretmanager v1.13.6 - cloud.google.com/go/storage => cloud.google.com/go/storage v1.43.0 - cloud.google.com/go/trace => cloud.google.com/go/trace v1.10.12 - code.cloudfoundry.org/bytefmt => code.cloudfoundry.org/bytefmt v0.1.0 + cloud.google.com/go => cloud.google.com/go v0.117.0 + cloud.google.com/go/bigquery => cloud.google.com/go/bigquery v1.65.0 + cloud.google.com/go/compute => cloud.google.com/go/compute v1.31.0 + cloud.google.com/go/datastore => cloud.google.com/go/datastore v1.20.0 + cloud.google.com/go/firestore => cloud.google.com/go/firestore v1.17.0 + cloud.google.com/go/iam => cloud.google.com/go/iam v1.3.0 + cloud.google.com/go/kms => cloud.google.com/go/kms v1.20.3 + cloud.google.com/go/monitoring => cloud.google.com/go/monitoring v1.22.0 + cloud.google.com/go/pubsub => cloud.google.com/go/pubsub v1.45.3 + cloud.google.com/go/secretmanager => cloud.google.com/go/secretmanager v1.14.2 + cloud.google.com/go/storage => cloud.google.com/go/storage v1.47.0 // https://github.com/googleapis/google-cloud-go/issues/11283 + cloud.google.com/go/trace => cloud.google.com/go/trace v1.11.2 + code.cloudfoundry.org/bytefmt => code.cloudfoundry.org/bytefmt v0.22.0 contrib.go.opencensus.io/exporter/aws => contrib.go.opencensus.io/exporter/aws v0.0.0-20230502192102-15967c811cec contrib.go.opencensus.io/exporter/prometheus => contrib.go.opencensus.io/exporter/prometheus v0.4.2 contrib.go.opencensus.io/integrations/ocsql => contrib.go.opencensus.io/integrations/ocsql v0.1.7 - git.sr.ht/~sbinet/gg => git.sr.ht/~sbinet/gg v0.5.0 + git.sr.ht/~sbinet/gg => git.sr.ht/~sbinet/gg v0.6.0 github.com/Azure/azure-amqp-common-go/v3 => github.com/Azure/azure-amqp-common-go/v3 v3.2.3 github.com/Azure/azure-sdk-for-go => github.com/Azure/azure-sdk-for-go v68.0.0+incompatible - github.com/Azure/azure-sdk-for-go/sdk/azcore => github.com/Azure/azure-sdk-for-go/sdk/azcore v1.14.0 - github.com/Azure/azure-sdk-for-go/sdk/azidentity => github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.7.0 + github.com/Azure/azure-sdk-for-go/sdk/azcore => github.com/Azure/azure-sdk-for-go/sdk/azcore v1.16.0 + github.com/Azure/azure-sdk-for-go/sdk/azidentity => github.com/Azure/azure-sdk-for-go/sdk/azidentity v1.8.0 github.com/Azure/azure-sdk-for-go/sdk/internal => github.com/Azure/azure-sdk-for-go/sdk/internal v1.10.0 - github.com/Azure/go-amqp => github.com/Azure/go-amqp v1.0.5 + github.com/Azure/go-amqp => github.com/Azure/go-amqp v1.3.0 github.com/Azure/go-autorest => github.com/Azure/go-autorest v14.2.1-0.20240530140449-f7ea664c9cff+incompatible github.com/Azure/go-autorest/autorest => github.com/Azure/go-autorest/autorest v0.11.30-0.20240530140449-f7ea664c9cff github.com/Azure/go-autorest/autorest/adal => github.com/Azure/go-autorest/autorest/adal v0.9.24 @@ -36,37 +36,37 @@ replace ( github.com/Azure/go-autorest/tracing => github.com/Azure/go-autorest/tracing v0.6.1-0.20240530140449-f7ea664c9cff github.com/BurntSushi/toml => github.com/BurntSushi/toml v1.4.0 github.com/DATA-DOG/go-sqlmock => github.com/DATA-DOG/go-sqlmock v1.5.2 - github.com/GoogleCloudPlatform/cloudsql-proxy => github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.0 - github.com/Masterminds/semver/v3 => github.com/Masterminds/semver/v3 v3.2.1 - github.com/ajstarks/deck => github.com/ajstarks/deck v0.0.0-20240814155529-0478e0c25be8 - github.com/ajstarks/deck/generate => github.com/ajstarks/deck/generate v0.0.0-20240814155529-0478e0c25be8 + github.com/GoogleCloudPlatform/cloudsql-proxy => github.com/GoogleCloudPlatform/cloudsql-proxy v1.37.3 + github.com/Masterminds/semver/v3 => github.com/Masterminds/semver/v3 v3.3.1 + github.com/ajstarks/deck => github.com/ajstarks/deck v0.0.0-20240918141114-8d365813662d + github.com/ajstarks/deck/generate => github.com/ajstarks/deck/generate v0.0.0-20240918141114-8d365813662d github.com/ajstarks/svgo => github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b github.com/akrylysov/pogreb => github.com/akrylysov/pogreb v0.10.2 github.com/antihax/optional => github.com/antihax/optional v1.0.0 github.com/armon/go-socks5 => github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 github.com/aws/aws-sdk-go => github.com/aws/aws-sdk-go v1.55.5 - github.com/aws/aws-sdk-go-v2 => github.com/aws/aws-sdk-go-v2 v1.30.4 - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream => github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 - github.com/aws/aws-sdk-go-v2/config => github.com/aws/aws-sdk-go-v2/config v1.27.28 - github.com/aws/aws-sdk-go-v2/credentials => github.com/aws/aws-sdk-go-v2/credentials v1.17.28 - github.com/aws/aws-sdk-go-v2/feature/ec2/imds => github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 - github.com/aws/aws-sdk-go-v2/feature/s3/manager => github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11 - github.com/aws/aws-sdk-go-v2/internal/configsources => github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 => github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 + github.com/aws/aws-sdk-go-v2 => github.com/aws/aws-sdk-go-v2 v1.32.6 + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream => github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 + github.com/aws/aws-sdk-go-v2/config => github.com/aws/aws-sdk-go-v2/config v1.28.6 + github.com/aws/aws-sdk-go-v2/credentials => github.com/aws/aws-sdk-go-v2/credentials v1.17.47 + github.com/aws/aws-sdk-go-v2/feature/ec2/imds => github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 + github.com/aws/aws-sdk-go-v2/feature/s3/manager => github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43 + github.com/aws/aws-sdk-go-v2/internal/configsources => github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 => github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 github.com/aws/aws-sdk-go-v2/internal/ini => github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding => github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 - github.com/aws/aws-sdk-go-v2/service/internal/checksum => github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 - github.com/aws/aws-sdk-go-v2/service/internal/s3shared => github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 - github.com/aws/aws-sdk-go-v2/service/kms => github.com/aws/aws-sdk-go-v2/service/kms v1.35.4 - github.com/aws/aws-sdk-go-v2/service/s3 => github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 - github.com/aws/aws-sdk-go-v2/service/secretsmanager => github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.32.5 - github.com/aws/aws-sdk-go-v2/service/sns => github.com/aws/aws-sdk-go-v2/service/sns v1.31.4 - github.com/aws/aws-sdk-go-v2/service/sqs => github.com/aws/aws-sdk-go-v2/service/sqs v1.34.4 - github.com/aws/aws-sdk-go-v2/service/ssm => github.com/aws/aws-sdk-go-v2/service/ssm v1.52.5 - github.com/aws/aws-sdk-go-v2/service/sso => github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 - github.com/aws/aws-sdk-go-v2/service/sts => github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 - github.com/aws/smithy-go => github.com/aws/smithy-go v1.20.4 + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding => github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 + github.com/aws/aws-sdk-go-v2/service/internal/checksum => github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url => github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 + github.com/aws/aws-sdk-go-v2/service/internal/s3shared => github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 + github.com/aws/aws-sdk-go-v2/service/kms => github.com/aws/aws-sdk-go-v2/service/kms v1.37.7 + github.com/aws/aws-sdk-go-v2/service/s3 => github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0 + github.com/aws/aws-sdk-go-v2/service/secretsmanager => github.com/aws/aws-sdk-go-v2/service/secretsmanager v1.34.7 + github.com/aws/aws-sdk-go-v2/service/sns => github.com/aws/aws-sdk-go-v2/service/sns v1.33.7 + github.com/aws/aws-sdk-go-v2/service/sqs => github.com/aws/aws-sdk-go-v2/service/sqs v1.37.2 + github.com/aws/aws-sdk-go-v2/service/ssm => github.com/aws/aws-sdk-go-v2/service/ssm v1.56.1 + github.com/aws/aws-sdk-go-v2/service/sso => github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 + github.com/aws/aws-sdk-go-v2/service/sts => github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 + github.com/aws/smithy-go => github.com/aws/smithy-go v1.22.1 github.com/benbjohnson/clock => github.com/benbjohnson/clock v1.3.5 github.com/beorn7/perks => github.com/beorn7/perks v1.0.1 github.com/bmizerany/assert => github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 @@ -79,11 +79,11 @@ replace ( github.com/chzyer/readline => github.com/chzyer/readline v1.5.1 github.com/chzyer/test => github.com/chzyer/test v1.0.0 github.com/cncf/udpa/go => github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe - github.com/cncf/xds/go => github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 + github.com/cncf/xds/go => github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57 github.com/cockroachdb/apd => github.com/cockroachdb/apd v1.1.0 github.com/coreos/go-systemd/v22 => github.com/coreos/go-systemd/v22 v22.5.0 - github.com/cpuguy83/go-md2man/v2 => github.com/cpuguy83/go-md2man/v2 v2.0.4 - github.com/creack/pty => github.com/creack/pty v1.1.23 + github.com/cpuguy83/go-md2man/v2 => github.com/cpuguy83/go-md2man/v2 v2.0.6 + github.com/creack/pty => github.com/creack/pty v1.1.24 github.com/davecgh/go-spew => github.com/davecgh/go-spew v1.1.1 github.com/denisenkom/go-mssqldb => github.com/denisenkom/go-mssqldb v0.12.3 github.com/devigned/tab => github.com/devigned/tab v0.1.1 @@ -92,13 +92,13 @@ replace ( github.com/docopt/docopt-go => github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 github.com/dustin/go-humanize => github.com/dustin/go-humanize v1.0.1 github.com/emicklei/go-restful/v3 => github.com/emicklei/go-restful/v3 v3.12.1 - github.com/envoyproxy/go-control-plane => github.com/envoyproxy/go-control-plane v0.13.0 + github.com/envoyproxy/go-control-plane => github.com/envoyproxy/go-control-plane v0.13.1 github.com/envoyproxy/protoc-gen-validate => github.com/envoyproxy/protoc-gen-validate v1.1.0 github.com/evanphx/json-patch => github.com/evanphx/json-patch v0.5.2 github.com/fogleman/gg => github.com/fogleman/gg v1.3.0 github.com/fortytw2/leaktest => github.com/fortytw2/leaktest v1.3.0 github.com/frankban/quicktest => github.com/frankban/quicktest v1.14.6 - github.com/fsnotify/fsnotify => github.com/fsnotify/fsnotify v1.7.0 + github.com/fsnotify/fsnotify => github.com/fsnotify/fsnotify v1.8.0 github.com/gin-contrib/sse => github.com/gin-contrib/sse v0.1.0 github.com/gin-gonic/gin => github.com/gin-gonic/gin v1.10.0 github.com/go-errors/errors => github.com/go-errors/errors v1.5.1 @@ -121,7 +121,7 @@ replace ( github.com/go-playground/assert/v2 => github.com/go-playground/assert/v2 v2.2.0 github.com/go-playground/locales => github.com/go-playground/locales v0.14.1 github.com/go-playground/universal-translator => github.com/go-playground/universal-translator v0.18.1 - github.com/go-playground/validator/v10 => github.com/go-playground/validator/v10 v10.22.0 + github.com/go-playground/validator/v10 => github.com/go-playground/validator/v10 v10.23.0 github.com/go-redis/redis/v8 => github.com/go-redis/redis/v8 v8.11.5 github.com/go-sql-driver/mysql => github.com/go-sql-driver/mysql v1.8.1 github.com/go-task/slim-sprig => github.com/go-task/slim-sprig v2.20.0+incompatible @@ -131,22 +131,22 @@ replace ( github.com/gobwas/httphead => github.com/gobwas/httphead v0.1.0 github.com/gobwas/pool => github.com/gobwas/pool v0.2.1 github.com/gobwas/ws => github.com/gobwas/ws v1.4.0 - github.com/goccy/go-json => github.com/goccy/go-json v0.10.3 - github.com/gocql/gocql => github.com/gocql/gocql v1.6.0 - github.com/gocraft/dbr/v2 => github.com/gocraft/dbr/v2 v2.7.6 + github.com/goccy/go-json => github.com/goccy/go-json v0.10.3 // https://github.com/goccy/go-json/issues/529 + github.com/gocql/gocql => github.com/gocql/gocql v1.7.0 + github.com/gocraft/dbr/v2 => github.com/gocraft/dbr/v2 v2.7.7 github.com/godbus/dbus/v5 => github.com/godbus/dbus/v5 v5.1.0 github.com/gofrs/uuid => github.com/gofrs/uuid v4.4.0+incompatible github.com/gogo/protobuf => github.com/gogo/protobuf v1.3.2 - github.com/golang-jwt/jwt/v4 => github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang-jwt/jwt/v4 => github.com/golang-jwt/jwt/v4 v4.5.1 github.com/golang-sql/civil => github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 github.com/golang-sql/sqlexp => github.com/golang-sql/sqlexp v0.1.0 github.com/golang/freetype => github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 - github.com/golang/glog => github.com/golang/glog v1.2.2 - github.com/golang/groupcache => github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da + github.com/golang/glog => github.com/golang/glog v1.2.3 + github.com/golang/groupcache => github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 github.com/golang/mock => github.com/golang/mock v1.6.0 github.com/golang/protobuf => github.com/golang/protobuf v1.5.4 github.com/golang/snappy => github.com/golang/snappy v0.0.4 - github.com/google/btree => github.com/google/btree v1.1.2 + github.com/google/btree => github.com/google/btree v1.1.3 github.com/google/gnostic => github.com/google/gnostic v0.7.0 github.com/google/go-cmp => github.com/google/go-cmp v0.6.0 github.com/google/go-replayers/grpcreplay => github.com/google/go-replayers/grpcreplay v1.3.0 @@ -154,23 +154,23 @@ replace ( github.com/google/gofuzz => github.com/google/gofuzz v1.2.0 github.com/google/martian => github.com/google/martian v2.1.0+incompatible github.com/google/martian/v3 => github.com/google/martian/v3 v3.3.3 - github.com/google/pprof => github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 + github.com/google/pprof => github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad github.com/google/shlex => github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 github.com/google/subcommands => github.com/google/subcommands v1.2.0 github.com/google/uuid => github.com/google/uuid v1.6.0 github.com/google/wire => github.com/google/wire v0.6.0 - github.com/googleapis/gax-go/v2 => github.com/googleapis/gax-go/v2 v2.13.0 + github.com/googleapis/gax-go/v2 => github.com/googleapis/gax-go/v2 v2.14.0 github.com/gorilla/mux => github.com/gorilla/mux v1.8.1 github.com/gorilla/websocket => github.com/gorilla/websocket v1.5.3 github.com/grafana/pyroscope-go/godeltaprof => github.com/grafana/pyroscope-go/godeltaprof v0.1.8 github.com/gregjones/httpcache => github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 - github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 + github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 github.com/hailocab/go-hostpool => github.com/kpango/go-hostpool v0.0.0-20210303030322-aab80263dcd0 - github.com/hanwen/go-fuse/v2 => github.com/hanwen/go-fuse/v2 v2.5.1 + github.com/hanwen/go-fuse/v2 => github.com/hanwen/go-fuse/v2 v2.7.2 github.com/hashicorp/go-uuid => github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version => github.com/hashicorp/go-version v1.7.0 github.com/iancoleman/strcase => github.com/iancoleman/strcase v0.3.0 - github.com/ianlancetaylor/demangle => github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca + github.com/ianlancetaylor/demangle => github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd github.com/inconshreveable/mousetrap => github.com/inconshreveable/mousetrap v1.1.0 github.com/jackc/chunkreader/v2 => github.com/jackc/chunkreader/v2 v2.0.1 github.com/jackc/pgconn => github.com/jackc/pgconn v1.14.3 @@ -179,7 +179,7 @@ replace ( github.com/jackc/pgpassfile => github.com/jackc/pgpassfile v1.0.0 github.com/jackc/pgproto3/v2 => github.com/jackc/pgproto3/v2 v2.3.3 github.com/jackc/pgservicefile => github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 - github.com/jackc/pgtype => github.com/jackc/pgtype v1.14.3 + github.com/jackc/pgtype => github.com/jackc/pgtype v1.14.4 github.com/jackc/pgx/v4 => github.com/jackc/pgx/v4 v4.18.3 github.com/jackc/puddle => github.com/jackc/puddle v1.3.0 github.com/jessevdk/go-flags => github.com/jessevdk/go-flags v1.6.1 @@ -190,13 +190,13 @@ replace ( github.com/josharian/intern => github.com/josharian/intern v1.0.0 github.com/json-iterator/go => github.com/json-iterator/go v1.1.12 github.com/jstemmer/go-junit-report => github.com/jstemmer/go-junit-report v1.0.0 - github.com/kisielk/errcheck => github.com/kisielk/errcheck v1.7.0 + github.com/kisielk/errcheck => github.com/kisielk/errcheck v1.8.0 github.com/kisielk/gotool => github.com/kisielk/gotool v1.0.0 - github.com/klauspost/compress => github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b - github.com/klauspost/cpuid/v2 => github.com/klauspost/cpuid/v2 v2.2.8 + github.com/klauspost/compress => github.com/klauspost/compress v1.17.12-0.20241216125714-bbaf27d0c3d9 + github.com/klauspost/cpuid/v2 => github.com/klauspost/cpuid/v2 v2.2.9 github.com/kpango/fastime => github.com/kpango/fastime v1.1.9 github.com/kpango/fuid => github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1 - github.com/kpango/gache/v2 => github.com/kpango/gache/v2 v2.0.9 + github.com/kpango/gache/v2 => github.com/kpango/gache/v2 v2.1.1 github.com/kpango/glg => github.com/kpango/glg v1.6.15 github.com/kr/fs => github.com/kr/fs v0.1.0 github.com/kr/pretty => github.com/kr/pretty v0.3.1 @@ -208,10 +208,10 @@ replace ( github.com/lib/pq => github.com/lib/pq v1.10.9 github.com/liggitt/tabwriter => github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de github.com/lucasb-eyer/go-colorful => github.com/lucasb-eyer/go-colorful v1.2.0 - github.com/mailru/easyjson => github.com/mailru/easyjson v0.7.7 + github.com/mailru/easyjson => github.com/mailru/easyjson v0.9.0 github.com/mattn/go-colorable => github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-isatty => github.com/mattn/go-isatty v0.0.20 - github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.14.22 + github.com/mattn/go-sqlite3 => github.com/mattn/go-sqlite3 v1.14.24 github.com/matttproud/golang_protobuf_extensions => github.com/matttproud/golang_protobuf_extensions v1.0.4 github.com/mitchellh/colorstring => github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db github.com/moby/spdystream => github.com/moby/spdystream v0.5.0 @@ -225,8 +225,8 @@ replace ( github.com/niemeyer/pretty => github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e github.com/nxadm/tail => github.com/nxadm/tail v1.4.11 github.com/onsi/ginkgo => github.com/onsi/ginkgo v1.16.5 - github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.20.0 - github.com/onsi/gomega => github.com/onsi/gomega v1.34.1 + github.com/onsi/ginkgo/v2 => github.com/onsi/ginkgo/v2 v2.22.0 + github.com/onsi/gomega => github.com/onsi/gomega v1.36.1 github.com/peterbourgon/diskv => github.com/peterbourgon/diskv v2.0.1+incompatible github.com/phpdave11/gofpdf => github.com/phpdave11/gofpdf v1.4.2 github.com/phpdave11/gofpdi => github.com/phpdave11/gofpdi v1.0.13 @@ -234,12 +234,12 @@ replace ( github.com/pierrec/lz4/v3 => github.com/pierrec/lz4/v3 v3.3.5 github.com/pkg/browser => github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c github.com/pkg/errors => github.com/pkg/errors v0.9.1 - github.com/pkg/sftp => github.com/pkg/sftp v1.13.6 + github.com/pkg/sftp => github.com/pkg/sftp v1.13.7 github.com/pmezard/go-difflib => github.com/pmezard/go-difflib v1.0.0 github.com/prashantv/gostub => github.com/prashantv/gostub v1.1.0 - github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.20.0 + github.com/prometheus/client_golang => github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model => github.com/prometheus/client_model v0.6.1 - github.com/prometheus/common => github.com/prometheus/common v0.55.0 + github.com/prometheus/common => github.com/prometheus/common v0.61.0 github.com/prometheus/procfs => github.com/prometheus/procfs v0.15.1 github.com/prometheus/prometheus => github.com/prometheus/prometheus v1.99.0 github.com/quasilyte/go-ruleguard => github.com/quasilyte/go-ruleguard v0.4.2 @@ -247,8 +247,8 @@ replace ( github.com/quasilyte/gogrep => github.com/quasilyte/gogrep v0.5.0 github.com/quasilyte/stdinfo => github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 github.com/rogpeppe/fastuuid => github.com/rogpeppe/fastuuid v1.2.0 - github.com/rogpeppe/go-internal => github.com/rogpeppe/go-internal v1.12.0 - github.com/rs/xid => github.com/rs/xid v1.5.0 + github.com/rogpeppe/go-internal => github.com/rogpeppe/go-internal v1.13.1 + github.com/rs/xid => github.com/rs/xid v1.6.0 github.com/rs/zerolog => github.com/rs/zerolog v1.33.0 github.com/russross/blackfriday/v2 => github.com/russross/blackfriday/v2 v2.1.0 github.com/ruudk/golang-pdf417 => github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245 @@ -265,7 +265,7 @@ replace ( github.com/spf13/pflag => github.com/spf13/pflag v1.0.5 github.com/stoewer/go-strcase => github.com/stoewer/go-strcase v1.3.0 github.com/stretchr/objx => github.com/stretchr/objx v0.5.2 - github.com/stretchr/testify => github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify => github.com/stretchr/testify v1.10.0 github.com/ugorji/go/codec => github.com/ugorji/go/codec v1.2.12 github.com/xeipuuv/gojsonpointer => github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb github.com/xeipuuv/gojsonreference => github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 @@ -273,55 +273,55 @@ replace ( github.com/xlab/treeprint => github.com/xlab/treeprint v1.2.0 github.com/zeebo/assert => github.com/zeebo/assert v1.3.1 github.com/zeebo/xxh3 => github.com/zeebo/xxh3 v1.0.2 - go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.10 + go.etcd.io/bbolt => go.etcd.io/bbolt v1.3.11 go.opencensus.io => go.opencensus.io v0.24.0 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 - go.opentelemetry.io/otel => go.opentelemetry.io/otel v1.28.0 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 + go.opentelemetry.io/otel => go.opentelemetry.io/otel v1.33.0 go.opentelemetry.io/otel/exporters/otlp/internal/retry => go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.17.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric => go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.43.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace => go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 - go.opentelemetry.io/otel/metric => go.opentelemetry.io/otel/metric v1.28.0 - go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/sdk/metric => go.opentelemetry.io/otel/sdk/metric v1.28.0 - go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v1.28.0 - go.opentelemetry.io/proto/otlp => go.opentelemetry.io/proto/otlp v1.3.1 - go.starlark.net => go.starlark.net v0.0.0-20240725214946-42030a7cedce + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc => go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace => go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc => go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 + go.opentelemetry.io/otel/metric => go.opentelemetry.io/otel/metric v1.33.0 + go.opentelemetry.io/otel/sdk => go.opentelemetry.io/otel/sdk v1.33.0 + go.opentelemetry.io/otel/sdk/metric => go.opentelemetry.io/otel/sdk/metric v1.33.0 + go.opentelemetry.io/otel/trace => go.opentelemetry.io/otel/trace v1.33.0 + go.opentelemetry.io/proto/otlp => go.opentelemetry.io/proto/otlp v1.4.0 + go.starlark.net => go.starlark.net v0.0.0-20241125201518-c05ff208a98f go.uber.org/atomic => go.uber.org/atomic v1.11.0 - go.uber.org/automaxprocs => go.uber.org/automaxprocs v1.5.3 + go.uber.org/automaxprocs => go.uber.org/automaxprocs v1.6.0 go.uber.org/goleak => go.uber.org/goleak v1.3.0 go.uber.org/multierr => go.uber.org/multierr v1.11.0 go.uber.org/zap => go.uber.org/zap v1.27.0 - gocloud.dev => gocloud.dev v0.39.0 - golang.org/x/crypto => golang.org/x/crypto v0.26.0 - golang.org/x/exp => golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa - golang.org/x/exp/typeparams => golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa - golang.org/x/image => golang.org/x/image v0.19.0 - golang.org/x/lint => golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 - golang.org/x/mobile => golang.org/x/mobile v0.0.0-20240806205939-81131f6468ab - golang.org/x/mod => golang.org/x/mod v0.20.0 - golang.org/x/net => golang.org/x/net v0.28.0 - golang.org/x/oauth2 => golang.org/x/oauth2 v0.22.0 - golang.org/x/sync => golang.org/x/sync v0.8.0 - golang.org/x/sys => golang.org/x/sys v0.24.0 - golang.org/x/term => golang.org/x/term v0.23.0 - golang.org/x/text => golang.org/x/text v0.17.0 - golang.org/x/time => golang.org/x/time v0.6.0 - golang.org/x/tools => golang.org/x/tools v0.24.0 - golang.org/x/xerrors => golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 + gocloud.dev => gocloud.dev v0.40.0 + golang.org/x/crypto => golang.org/x/crypto v0.31.0 + golang.org/x/exp => golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 + golang.org/x/exp/typeparams => golang.org/x/exp/typeparams v0.0.0-20241217172543-b2144cdd0a67 + golang.org/x/image => golang.org/x/image v0.23.0 + golang.org/x/lint => golang.org/x/lint v0.0.0-20241112194109-818c5a804067 + golang.org/x/mobile => golang.org/x/mobile v0.0.0-20241213221354-a87c1cf6cf46 + golang.org/x/mod => golang.org/x/mod v0.22.0 + golang.org/x/net => golang.org/x/net v0.33.0 + golang.org/x/oauth2 => golang.org/x/oauth2 v0.24.0 + golang.org/x/sync => golang.org/x/sync v0.10.0 + golang.org/x/sys => golang.org/x/sys v0.28.0 + golang.org/x/term => golang.org/x/term v0.27.0 + golang.org/x/text => golang.org/x/text v0.21.0 + golang.org/x/time => golang.org/x/time v0.8.0 + golang.org/x/tools => golang.org/x/tools v0.28.0 + golang.org/x/xerrors => golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da gomodules.xyz/jsonpatch/v2 => gomodules.xyz/jsonpatch/v2 v2.4.0 gonum.org/v1/gonum => gonum.org/v1/gonum v0.15.1 gonum.org/v1/hdf5 => gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 - gonum.org/v1/plot => gonum.org/v1/plot v0.14.0 - google.golang.org/api => google.golang.org/api v0.192.0 + gonum.org/v1/plot => gonum.org/v1/plot v0.15.0 + google.golang.org/api => google.golang.org/api v0.213.0 google.golang.org/appengine => google.golang.org/appengine v1.6.8 - google.golang.org/genproto => google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/grpc => google.golang.org/grpc v1.65.0 + google.golang.org/genproto => google.golang.org/genproto v0.0.0-20241216192217-9240e9c98484 + google.golang.org/genproto/googleapis/api => google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 + google.golang.org/genproto/googleapis/rpc => google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 + google.golang.org/grpc => google.golang.org/grpc v1.69.2 google.golang.org/grpc/cmd/protoc-gen-go-grpc => google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 - google.golang.org/protobuf => google.golang.org/protobuf v1.34.2 + google.golang.org/protobuf => google.golang.org/protobuf v1.36.0 gopkg.in/check.v1 => gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c gopkg.in/inconshreveable/log15.v2 => gopkg.in/inconshreveable/log15.v2 v2.16.0 gopkg.in/inf.v0 => gopkg.in/inf.v0 v0.9.1 @@ -329,32 +329,32 @@ replace ( gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1 honnef.co/go/tools => honnef.co/go/tools v0.5.1 - k8s.io/api => k8s.io/api v0.30.3 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.30.3 - k8s.io/apimachinery => k8s.io/apimachinery v0.30.3 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.30.3 - k8s.io/client-go => k8s.io/client-go v0.30.3 - k8s.io/component-base => k8s.io/component-base v0.30.3 + k8s.io/api => k8s.io/api v0.32.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.32.0 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.0 + k8s.io/client-go => k8s.io/client-go v0.32.0 + k8s.io/component-base => k8s.io/component-base v0.32.0 k8s.io/klog/v2 => k8s.io/klog/v2 v2.130.1 - k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 - k8s.io/kubernetes => k8s.io/kubernetes v0.30.3 - k8s.io/metrics => k8s.io/metrics v0.30.3 + k8s.io/kube-openapi => k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 + k8s.io/kubernetes => k8s.io/kubernetes v0.32.0 + k8s.io/metrics => k8s.io/metrics v0.32.0 nhooyr.io/websocket => nhooyr.io/websocket v1.8.17 rsc.io/pdf => rsc.io/pdf v0.1.1 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.18.4 - sigs.k8s.io/json => sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd + sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.19.3 + sigs.k8s.io/json => sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 sigs.k8s.io/kustomize => sigs.k8s.io/kustomize v2.0.3+incompatible - sigs.k8s.io/structured-merge-diff/v4 => sigs.k8s.io/structured-merge-diff/v4 v4.4.1 + sigs.k8s.io/structured-merge-diff/v4 => sigs.k8s.io/structured-merge-diff/v4 v4.5.0 sigs.k8s.io/yaml => sigs.k8s.io/yaml v1.4.0 ) require ( - buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2 + buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.0-20241127180247-a33202765966.1 cloud.google.com/go/storage v1.43.0 code.cloudfoundry.org/bytefmt v0.0.0-20190710193110-1eb035ffe2b6 github.com/akrylysov/pogreb v0.0.0-00010101000000-000000000000 github.com/aws/aws-sdk-go v1.55.5 - github.com/felixge/fgprof v0.9.4 + github.com/felixge/fgprof v0.9.5 github.com/fsnotify/fsnotify v1.7.0 github.com/go-redis/redis/v8 v8.0.0-00010101000000-000000000000 github.com/go-sql-driver/mysql v1.8.1 @@ -378,78 +378,91 @@ require ( github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 github.com/quasilyte/go-ruleguard v0.0.0-00010101000000-000000000000 github.com/quasilyte/go-ruleguard/dsl v0.3.22 + github.com/quic-go/quic-go v0.48.2 github.com/scylladb/gocqlx v0.0.0-00010101000000-000000000000 - github.com/stretchr/testify v1.9.0 + github.com/stretchr/testify v1.10.0 + github.com/unum-cloud/usearch/golang v0.0.0-20241213232841-3964f8392443 github.com/zeebo/xxh3 v1.0.2 - go.etcd.io/bbolt v1.3.8 - go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 - go.opentelemetry.io/otel v1.28.0 + go.etcd.io/bbolt v1.3.11 + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 + go.opentelemetry.io/otel v1.33.0 go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.0.0-00010101000000-000000000000 - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.19.0 - go.opentelemetry.io/otel/metric v1.28.0 - go.opentelemetry.io/otel/sdk v1.28.0 - go.opentelemetry.io/otel/sdk/metric v1.28.0 - go.opentelemetry.io/otel/trace v1.28.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 + go.opentelemetry.io/otel/metric v1.33.0 + go.opentelemetry.io/otel/sdk v1.33.0 + go.opentelemetry.io/otel/sdk/metric v1.33.0 + go.opentelemetry.io/otel/trace v1.33.0 go.uber.org/automaxprocs v0.0.0-00010101000000-000000000000 go.uber.org/goleak v1.3.0 go.uber.org/ratelimit v0.3.1 go.uber.org/zap v1.27.0 gocloud.dev v0.0.0-00010101000000-000000000000 - golang.org/x/net v0.28.0 - golang.org/x/oauth2 v0.22.0 - golang.org/x/sync v0.8.0 - golang.org/x/sys v0.24.0 - golang.org/x/text v0.17.0 - golang.org/x/time v0.6.0 - golang.org/x/tools v0.24.0 + golang.org/x/net v0.32.0 + golang.org/x/oauth2 v0.24.0 + golang.org/x/sync v0.10.0 + golang.org/x/sys v0.28.0 + golang.org/x/text v0.21.0 + golang.org/x/time v0.8.0 + golang.org/x/tools v0.28.0 gonum.org/v1/hdf5 v0.0.0-00010101000000-000000000000 gonum.org/v1/plot v0.14.0 - google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 - google.golang.org/grpc v1.65.0 - google.golang.org/protobuf v1.34.2 + google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 + google.golang.org/genproto/googleapis/rpc v0.0.0-20241209162323-e6fa225c2576 + google.golang.org/grpc v1.68.1 + google.golang.org/protobuf v1.36.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.30.3 - k8s.io/apimachinery v0.30.3 + k8s.io/api v0.32.0 + k8s.io/apimachinery v0.32.0 k8s.io/cli-runtime v0.0.0-00010101000000-000000000000 - k8s.io/client-go v0.30.3 + k8s.io/client-go v0.32.0 k8s.io/metrics v0.0.0-00010101000000-000000000000 - k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 + k8s.io/utils v0.0.0-20241210054802-24370beab758 sigs.k8s.io/controller-runtime v0.0.0-00010101000000-000000000000 sigs.k8s.io/yaml v1.4.0 ) require ( - cloud.google.com/go v0.115.0 // indirect - cloud.google.com/go/auth v0.8.1 // indirect - cloud.google.com/go/auth/oauth2adapt v0.2.4 // indirect - cloud.google.com/go/compute/metadata v0.5.0 // indirect - cloud.google.com/go/iam v1.1.13 // indirect + cel.dev/expr v0.18.0 // indirect + cloud.google.com/go v0.117.0 // indirect + cloud.google.com/go/auth v0.13.0 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.6 // indirect + cloud.google.com/go/compute/metadata v0.6.0 // indirect + cloud.google.com/go/iam v1.3.0 // indirect + cloud.google.com/go/monitoring v1.22.0 // indirect filippo.io/edwards25519 v1.1.0 // indirect - git.sr.ht/~sbinet/gg v0.5.0 // indirect - github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + git.sr.ht/~sbinet/gg v0.6.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 // indirect + github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 // indirect github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b // indirect github.com/benbjohnson/clock v1.3.0 // indirect github.com/beorn7/perks v1.0.1 // indirect + github.com/blang/semver/v4 v4.0.0 // indirect github.com/campoy/embedmd v1.0.0 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect - github.com/emicklei/go-restful/v3 v3.12.1 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/envoyproxy/go-control-plane v0.13.1 // indirect + github.com/envoyproxy/protoc-gen-validate v1.1.0 // indirect github.com/evanphx/json-patch/v5 v5.9.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-fonts/liberation v0.3.3 // indirect - github.com/go-latex/latex v0.0.0-20231108140139-5c1ce85aa4ea // indirect + github.com/go-latex/latex v0.0.0-20240709081214-31cef3c7570e // indirect github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect - github.com/go-openapi/jsonreference v0.21.0 // indirect + github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/go-pdf/fpdf v0.9.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/go-toolsmith/astcopy v1.0.2 // indirect github.com/go-toolsmith/astequal v1.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect @@ -458,20 +471,19 @@ require ( github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect github.com/golang/snappy v0.0.4 // indirect - github.com/google/btree v1.1.2 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/btree v1.1.3 // indirect + github.com/google/gnostic-models v0.6.9 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 // indirect + github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect github.com/google/s2a-go v0.1.8 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/wire v0.6.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect - github.com/googleapis/gax-go/v2 v2.13.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect + github.com/googleapis/gax-go/v2 v2.14.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect - github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.20.0 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect - github.com/imdario/mergo v0.3.6 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -479,49 +491,54 @@ require ( github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/moby/spdystream v0.2.0 // indirect - github.com/moby/term v0.0.0-20221205130635-1aeaba878587 // indirect + github.com/moby/spdystream v0.5.0 // indirect + github.com/moby/term v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/ginkgo/v2 v2.22.0 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/pkg/errors v0.9.1 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_golang v1.19.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect + github.com/prometheus/client_golang v1.20.4 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quasilyte/gogrep v0.5.0 // indirect github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect github.com/scylladb/go-reflectx v1.0.1 // indirect - github.com/spf13/cobra v1.7.0 // indirect + github.com/spf13/cobra v1.8.1 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect + github.com/x448/float16 v0.8.4 // indirect github.com/xlab/treeprint v1.2.0 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect - go.opentelemetry.io/proto/otlp v1.3.1 // indirect - go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/detectors/gcp v1.31.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 // indirect + go.opentelemetry.io/proto/otlp v1.4.0 // indirect + go.uber.org/mock v0.4.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.26.0 // indirect - golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa // indirect + golang.org/x/crypto v0.31.0 // indirect + golang.org/x/exp v0.0.0-20241009180824-f66d83c29e7c // indirect golang.org/x/exp/typeparams v0.0.0-20240213143201-ec583247a57a // indirect - golang.org/x/image v0.19.0 // indirect - golang.org/x/mod v0.20.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 // indirect + golang.org/x/image v0.23.0 // indirect + golang.org/x/mod v0.22.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect - google.golang.org/api v0.191.0 // indirect - google.golang.org/genproto v0.0.0-20240812133136-8ffd90a71988 // indirect + google.golang.org/api v0.211.0 // indirect + google.golang.org/genproto v0.0.0-20241118233622-e639e219e697 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/apiextensions-apiserver v0.30.1 // indirect + k8s.io/apiextensions-apiserver v0.31.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect + k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/kustomize/api v0.18.0 // indirect + sigs.k8s.io/kustomize/kyaml v0.18.1 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect ) diff --git a/go.sum b/go.sum index 309a52862a..d7b67a84bd 100644 --- a/go.sum +++ b/go.sum @@ -1,100 +1,115 @@ -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2 h1:SZRVx928rbYZ6hEKUIN+vtGDkl7uotABRWGY4OAg5gM= -buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.34.2-20240717164558-a6c49f84cc0f.2/go.mod h1:ylS4c28ACSI59oJrOdW4pHS4n0Hw4TgSPHn8rpHl4Yw= -cel.dev/expr v0.15.0 h1:O1jzfJCQBfL5BFoYktaxwIhuttaQPsVWerH9/EEKx0w= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.0-20241127180247-a33202765966.1 h1:ntAj16eF7AtUyzOOAFk5gvbAO52QmUKPKk7GmsIEORo= +buf.build/gen/go/bufbuild/protovalidate/protocolbuffers/go v1.36.0-20241127180247-a33202765966.1/go.mod h1:AxRT+qTj5PJCz2nyQzsR/qxAcveW5USRhJTt/edTO5w= cel.dev/expr v0.15.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= -cloud.google.com/go v0.115.1 h1:Jo0SM9cQnSkYfp44+v+NQXHpcHqlnRJk2qxh6yvxxxQ= -cloud.google.com/go v0.115.1/go.mod h1:DuujITeaufu3gL68/lOFIirVNJwQeyf5UXyi+Wbgknc= -cloud.google.com/go/accessapproval v1.7.11/go.mod h1:KGK3+CLDWm4BvjN0wFtZqdFUGhxlTvTF6PhAwQJGL4M= -cloud.google.com/go/accesscontextmanager v1.8.9/go.mod h1:IXvQesVgOC7aXgK9OpYFn5eWnzz8fazegIiJ5WnCOVw= -cloud.google.com/go/accesscontextmanager v1.8.11/go.mod h1:nwPysISS3KR5qXipAU6cW/UbDavDdTBBgPohbkhGSok= -cloud.google.com/go/aiplatform v1.68.0/go.mod h1:105MFA3svHjC3Oazl7yjXAmIR89LKhRAeNdnDKJczME= -cloud.google.com/go/analytics v0.23.6/go.mod h1:cFz5GwWHrWQi8OHKP9ep3Z4pvHgGcG9lPnFQ+8kXsNo= -cloud.google.com/go/apigateway v1.6.11/go.mod h1:4KsrYHn/kSWx8SNUgizvaz+lBZ4uZfU7mUDsGhmkWfM= -cloud.google.com/go/apigeeconnect v1.6.11/go.mod h1:iMQLTeKxtKL+sb0D+pFlS/TO6za2IUOh/cwMEtn/4g0= -cloud.google.com/go/apigeeregistry v0.8.9/go.mod h1:4XivwtSdfSO16XZdMEQDBCMCWDp3jkCBRhVgamQfLSA= -cloud.google.com/go/appengine v1.8.11/go.mod h1:xET3coaDUj+OP4TgnZlgQ+rG2R9fG2nblya13czP56Q= -cloud.google.com/go/area120 v0.8.11/go.mod h1:VBxJejRAJqeuzXQBbh5iHBYUkIjZk5UzFZLCXmzap2o= -cloud.google.com/go/artifactregistry v1.14.13/go.mod h1:zQ/T4xoAFPtcxshl+Q4TJBgsy7APYR/BLd2z3xEAqRA= -cloud.google.com/go/asset v1.19.5/go.mod h1:sqyLOYaLLfc4ACcn3YxqHno+J7lRt9NJTdO50zCUcY0= -cloud.google.com/go/assuredworkloads v1.11.11/go.mod h1:vaYs6+MHqJvLKYgZBOsuuOhBgNNIguhRU0Kt7JTGcnI= +cel.dev/expr v0.16.0/go.mod h1:TRSuuV7DlVCE/uwv5QbAiW/v8l5O8C4eEPHeu7gf7Sg= +cel.dev/expr v0.16.1/go.mod h1:AsGA5zb3WruAEQeQng1RZdGEXmBj0jvMWh6l5SnNuC8= +cel.dev/expr v0.16.2/go.mod h1:gXngZQMkWJoSbE8mOzehJlXQyubn/Vg0vR9/F3W7iw8= +cel.dev/expr v0.18.0 h1:CJ6drgk+Hf96lkLikr4rFf19WrU0BOWEihyZnI2TAzo= +cel.dev/expr v0.18.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw= +cloud.google.com/go v0.117.0 h1:Z5TNFfQxj7WG2FgOGX1ekC5RiXrYgms6QscOm32M/4s= +cloud.google.com/go v0.117.0/go.mod h1:ZbwhVTb1DBGt2Iwb3tNO6SEK4q+cplHZmLWH+DelYYc= +cloud.google.com/go/accessapproval v1.8.2/go.mod h1:aEJvHZtpjqstffVwF/2mCXXSQmpskyzvw6zKLvLutZM= +cloud.google.com/go/accesscontextmanager v1.9.1/go.mod h1:wUVSoz8HmG7m9miQTh6smbyYuNOJrvZukK5g6WxSOp0= +cloud.google.com/go/accesscontextmanager v1.9.2/go.mod h1:T0Sw/PQPyzctnkw1pdmGAKb7XBA84BqQzH0fSU7wzJU= +cloud.google.com/go/aiplatform v1.69.0/go.mod h1:nUsIqzS3khlnWvpjfJbP+2+h+VrFyYsTm7RNCAViiY8= +cloud.google.com/go/analytics v0.25.2/go.mod h1:th0DIunqrhI1ZWVlT3PH2Uw/9ANX8YHfFDEPqf/+7xM= +cloud.google.com/go/apigateway v1.7.2/go.mod h1:+weId+9aR9J6GRwDka7jIUSrKEX60XGcikX7dGU8O7M= +cloud.google.com/go/apigeeconnect v1.7.2/go.mod h1:he/SWi3A63fbyxrxD6jb67ak17QTbWjva1TFbT5w8Kw= +cloud.google.com/go/apigeeregistry v0.9.2/go.mod h1:A5n/DwpG5NaP2fcLYGiFA9QfzpQhPRFNATO1gie8KM8= +cloud.google.com/go/appengine v1.9.2/go.mod h1:bK4dvmMG6b5Tem2JFZcjvHdxco9g6t1pwd3y/1qr+3s= +cloud.google.com/go/area120 v0.9.2/go.mod h1:Ar/KPx51UbrTWGVGgGzFnT7hFYQuk/0VOXkvHdTbQMI= +cloud.google.com/go/artifactregistry v1.16.0/go.mod h1:LunXo4u2rFtvJjrGjO0JS+Gs9Eco2xbZU6JVJ4+T8Sk= +cloud.google.com/go/asset v1.20.3/go.mod h1:797WxTDwdnFAJzbjZ5zc+P5iwqXc13yO9DHhmS6wl+o= +cloud.google.com/go/assuredworkloads v1.12.2/go.mod h1:/WeRr/q+6EQYgnoYrqCVgw7boMoDfjXZZev3iJxs2Iw= cloud.google.com/go/auth v0.2.1/go.mod h1:khQRBNrvNoHiHhV1iu2x8fSnlNbCaVHilznW5MAI5GY= cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= +cloud.google.com/go/auth v0.4.1/go.mod h1:QVBuVEKpCn4Zp58hzRGvL0tjRGU0YqdRTdCHM1IHnro= cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= -cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= -cloud.google.com/go/auth v0.6.0/go.mod h1:b4acV+jLQDyjwm4OXHYjNvRi4jvGBzHWJRtJcy+2P4g= cloud.google.com/go/auth v0.6.1/go.mod h1:eFHG7zDzbXHKmjJddFG/rBlcGp6t25SwRUiEQSlO4x4= cloud.google.com/go/auth v0.7.0/go.mod h1:D+WqdrpcjmiCgWrXmLLxOVq1GACoE36chW6KXoEvuIw= cloud.google.com/go/auth v0.7.2/go.mod h1:VEc4p5NNxycWQTMQEDQF0bd6aTMb6VgYDXEwiJJQAbs= -cloud.google.com/go/auth v0.8.0/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= -cloud.google.com/go/auth v0.8.1 h1:QZW9FjC5lZzN864p13YxvAtGUlQ+KgRL+8Sg45Z6vxo= -cloud.google.com/go/auth v0.8.1/go.mod h1:qGVp/Y3kDRSDZ5gFD/XPUfYQ9xW1iI7q8RIRoCyBbJc= +cloud.google.com/go/auth v0.9.0/go.mod h1:2HsApZBr9zGZhC9QAXsYVYaWk8kNUt37uny+XVKi7wM= +cloud.google.com/go/auth v0.9.3/go.mod h1:7z6VY+7h3KUdRov5F1i8NDP5ZzWKYmEPO842BgCsmTk= +cloud.google.com/go/auth v0.9.4/go.mod h1:SHia8n6//Ya940F1rLimhJCjjx7KE17t0ctFEci3HkA= +cloud.google.com/go/auth v0.9.9/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth v0.10.2/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth v0.11.0/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI= +cloud.google.com/go/auth v0.12.1/go.mod h1:BFMu+TNpF3DmvfBO9ClqTR/SiqVIm7LukKF9mbendF4= +cloud.google.com/go/auth v0.13.0 h1:8Fu8TZy167JkW8Tj3q7dIkr2v4cndv41ouecJx0PAHs= +cloud.google.com/go/auth v0.13.0/go.mod h1:COOjD9gwfKNKz+IIduatIhYJQIc0mG3H102r/EMxX6Q= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= -cloud.google.com/go/auth/oauth2adapt v0.2.3/go.mod h1:tMQXOfZzFuNuUxOypHlQEXgdfX5cuhwU+ffUuXRJE8I= -cloud.google.com/go/auth/oauth2adapt v0.2.4 h1:0GWE/FUsXhf6C+jAkWgYm7X9tK8cuEIfy19DBn6B6bY= cloud.google.com/go/auth/oauth2adapt v0.2.4/go.mod h1:jC/jOpwFP6JBxhB3P5Rr0a9HLMC/Pe3eaL4NmdvqPtc= -cloud.google.com/go/automl v1.13.11/go.mod h1:oMJdXRDOVC+Eq3PnGhhxSut5Hm9TSyVx1aLEOgerOw8= -cloud.google.com/go/baremetalsolution v1.2.10/go.mod h1:eO2c2NMRy5ytcNPhG78KPsWGNsX5W/tUsCOWmYihx6I= -cloud.google.com/go/batch v1.9.2/go.mod h1:smqwS4sleDJVAEzBt/TzFfXLktmWjFNugGDWl8coKX4= -cloud.google.com/go/beyondcorp v1.0.10/go.mod h1:G09WxvxJASbxbrzaJUMVvNsB1ZiaKxpbtkjiFtpDtbo= -cloud.google.com/go/bigquery v1.62.0/go.mod h1:5ee+ZkF1x/ntgCsFQJAQTM3QkAZOecfCmvxhkJsWRSA= +cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/auth/oauth2adapt v0.2.6 h1:V6a6XDu2lTwPZWOawrAa9HUK+DB2zfJyTuciBG5hFkU= +cloud.google.com/go/auth/oauth2adapt v0.2.6/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8= +cloud.google.com/go/automl v1.14.3/go.mod h1:XBkHTOSBIXNLrGgz9zHImy3wNAx9mHo6FLWWqDygrTk= +cloud.google.com/go/baremetalsolution v1.3.2/go.mod h1:3+wqVRstRREJV/puwaKAH3Pnn7ByreZG2aFRsavnoBQ= +cloud.google.com/go/batch v1.11.4/go.mod h1:l7i656a/EGqpzgEaCEMcPwh49dgFeor4KN4BK//V1Po= +cloud.google.com/go/beyondcorp v1.1.2/go.mod h1:q6YWSkEsSZTU2WDt1qtz6P5yfv79wgktGtNbd0FJTLI= +cloud.google.com/go/bigquery v1.65.0/go.mod h1:9WXejQ9s5YkTW4ryDYzKXBooL78u5+akWGXgJqQkY6A= cloud.google.com/go/bigtable v1.18.1/go.mod h1:NAVyfJot9jlo+KmgWLUJ5DJGwNDoChzAcrecLpmuAmY= cloud.google.com/go/bigtable v1.20.0/go.mod h1:upJDn8frsjzpRMfybiWkD1PG6WCCL7CRl26MgVeoXY4= -cloud.google.com/go/bigtable v1.27.2-0.20240802230159-f371928b558f/go.mod h1:avmXcmxVbLJAo9moICRYMgDyTTPoV0MA0lHKnyqV4fQ= -cloud.google.com/go/billing v1.18.9/go.mod h1:bKTnh8MBfCMUT1fzZ936CPN9rZG7ZEiHB2J3SjIjByc= -cloud.google.com/go/binaryauthorization v1.8.7/go.mod h1:cRj4teQhOme5SbWQa96vTDATQdMftdT5324BznxANtg= -cloud.google.com/go/certificatemanager v1.8.5/go.mod h1:r2xINtJ/4xSz85VsqvjY53qdlrdCjyniib9Jp98ZKKM= -cloud.google.com/go/channel v1.17.11/go.mod h1:gjWCDBcTGQce/BSMoe2lAqhlq0dIRiZuktvBKXUawp0= -cloud.google.com/go/cloudbuild v1.16.5/go.mod h1:HXLpZ8QeYZgmDIWpbl9Gs22p6o6uScgQ/cV9HF9cIZU= -cloud.google.com/go/clouddms v1.7.10/go.mod h1:PzHELq0QDyA7VaD9z6mzh2mxeBz4kM6oDe8YxMxd4RA= -cloud.google.com/go/cloudtasks v1.12.12/go.mod h1:8UmM+duMrQpzzRREo0i3x3TrFjsgI/3FQw3664/JblA= -cloud.google.com/go/compute v1.27.5/go.mod h1:DfwDGujFTdSeiE8b8ZqadF/uxHFBz+ekGsk8Zfi9dTA= +cloud.google.com/go/bigtable v1.33.0/go.mod h1:HtpnH4g25VT1pejHRtInlFPnN5sjTxbQlsYBjh9t5l0= +cloud.google.com/go/billing v1.20.0/go.mod h1:AAtih/X2nka5mug6jTAq8jfh1nPye0OjkHbZEZgU59c= +cloud.google.com/go/binaryauthorization v1.9.2/go.mod h1:T4nOcRWi2WX4bjfSRXJkUnpliVIqjP38V88Z10OvEv4= +cloud.google.com/go/certificatemanager v1.9.2/go.mod h1:PqW+fNSav5Xz8bvUnJpATIRo1aaABP4mUg/7XIeAn6c= +cloud.google.com/go/channel v1.19.1/go.mod h1:ungpP46l6XUeuefbA/XWpWWnAY3897CSRPXUbDstwUo= +cloud.google.com/go/cloudbuild v1.19.1/go.mod h1:VIq8XLI8tixd3YpySXxQ/tqJMcewMYRXqsMAXbdKCt4= +cloud.google.com/go/clouddms v1.8.2/go.mod h1:pe+JSp12u4mYOkwXpSMouyCCuQHL3a6xvWH2FgOcAt4= +cloud.google.com/go/cloudtasks v1.13.2/go.mod h1:2pyE4Lhm7xY8GqbZKLnYk7eeuh8L0JwAvXx1ecKxYu8= +cloud.google.com/go/compute v1.31.0/go.mod h1:4SCUCDAvOQvMGu4ze3YIJapnY0UQa5+WvJJeYFsQRoo= cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.4.0/go.mod h1:SIQh1Kkb4ZJ8zJ874fqVkslA29PRXuleyj6vOzlbK7M= -cloud.google.com/go/compute/metadata v0.5.0 h1:Zr0eK8JbFv6+Wi4ilXAR8FJ3wyNdpxHKJNPos6LTZOY= cloud.google.com/go/compute/metadata v0.5.0/go.mod h1:aHnloV2TPI38yx4s9+wAZhHykWvVCfu7hQbF+9CWoiY= -cloud.google.com/go/contactcenterinsights v1.13.6/go.mod h1:mL+DbN3pMQGaAbDC4wZhryLciwSwHf5Tfk4Itr72Zyk= -cloud.google.com/go/container v1.38.0/go.mod h1:U0uPBvkVWOJGY/0qTVuPS7NeafFEUsHSPqT5pB8+fCY= -cloud.google.com/go/containeranalysis v0.12.1/go.mod h1:+/lcJIQSFt45TC0N9Nq7/dPbl0isk6hnC4EvBBqyXsM= -cloud.google.com/go/datacatalog v1.20.3/go.mod h1:AKC6vAy5urnMg5eJK3oUjy8oa5zMbiY33h125l8lmlo= -cloud.google.com/go/datacatalog v1.21.0/go.mod h1:DB0QWF9nelpsbB0eR/tA0xbHZZMvpoFD1XFy3Qv/McI= -cloud.google.com/go/dataflow v0.9.11/go.mod h1:CCLufd7I4pPfyp54qMgil/volrL2ZKYjXeYLfQmBGJs= -cloud.google.com/go/dataform v0.9.8/go.mod h1:cGJdyVdunN7tkeXHPNosuMzmryx55mp6cInYBgxN3oA= -cloud.google.com/go/datafusion v1.7.11/go.mod h1:aU9zoBHgYmoPp4dzccgm/Gi4xWDMXodSZlNZ4WNeptw= -cloud.google.com/go/datalabeling v0.8.11/go.mod h1:6IGUV3z7hlkAU5ndKVshv/8z+7pxE+k0qXsEjyzO1Xg= -cloud.google.com/go/dataplex v1.18.2/go.mod h1:NuBpJJMGGQn2xctX+foHEDKRbizwuiHJamKvvSteY3Q= -cloud.google.com/go/dataproc/v2 v2.5.3/go.mod h1:RgA5QR7v++3xfP7DlgY3DUmoDSTaaemPe0ayKrQfyeg= -cloud.google.com/go/dataqna v0.8.11/go.mod h1:74Icl1oFKKZXPd+W7YDtqJLa+VwLV6wZ+UF+sHo2QZQ= -cloud.google.com/go/datastore v1.17.1/go.mod h1:mtzZ2HcVtz90OVrEXXGDc2pO4NM1kiBQy8YV4qGe0ZM= -cloud.google.com/go/datastream v1.10.10/go.mod h1:NqchuNjhPlISvWbk426/AU/S+Kgv7srlID9P5XOAbtg= -cloud.google.com/go/deploy v1.21.0/go.mod h1:PaOfS47VrvmYnxG5vhHg0KU60cKeWcqyLbMBjxS8DW8= -cloud.google.com/go/dialogflow v1.55.0/go.mod h1:0u0hSlJiFpMkMpMNoFrQETwDjaRm8Q8hYKv+jz5JeRA= -cloud.google.com/go/dlp v1.16.0/go.mod h1:LtPZxZAenBXKzvWIOB2hdHIXuEcK0wW0En8//u+/nNA= -cloud.google.com/go/documentai v1.31.0/go.mod h1:5ajlDvaPyl9tc+K/jZE8WtYIqSXqAD33Z1YAYIjfad4= -cloud.google.com/go/domains v0.9.11/go.mod h1:efo5552kUyxsXEz30+RaoIS2lR7tp3M/rhiYtKXkhkk= -cloud.google.com/go/edgecontainer v1.2.5/go.mod h1:OAb6tElD3F3oBujFAup14PKOs9B/lYobTb6LARmoACY= -cloud.google.com/go/errorreporting v0.3.1/go.mod h1:6xVQXU1UuntfAf+bVkFk6nld41+CPyF2NSPCyXE3Ztk= -cloud.google.com/go/essentialcontacts v1.6.12/go.mod h1:UGhWTIYewH8Ma4wDRJp8cMAHUCeAOCKsuwd6GLmmQLc= -cloud.google.com/go/eventarc v1.13.10/go.mod h1:KlCcOMApmUaqOEZUpZRVH+p0nnnsY1HaJB26U4X5KXE= -cloud.google.com/go/filestore v1.8.7/go.mod h1:dKfyH0YdPAKdYHqAR/bxZeil85Y5QmrEVQwIYuRjcXI= -cloud.google.com/go/firestore v1.16.0/go.mod h1:+22v/7p+WNBSQwdSwP57vz47aZiY+HrDkrOsJNhk7rg= -cloud.google.com/go/functions v1.16.6/go.mod h1:wOzZakhMueNQaBUJdf0yjsJIe0GBRu+ZTvdSTzqHLs0= -cloud.google.com/go/gkebackup v1.5.4/go.mod h1:V+llvHlRD0bCyrkYaAMJX+CHralceQcaOWjNQs8/Ymw= -cloud.google.com/go/gkeconnect v0.8.11/go.mod h1:ejHv5ehbceIglu1GsMwlH0nZpTftjxEY6DX7tvaM8gA= -cloud.google.com/go/gkehub v0.14.11/go.mod h1:CsmDJ4qbBnSPkoBltEubK6qGOjG0xNfeeT5jI5gCnRQ= -cloud.google.com/go/gkemulticloud v1.2.4/go.mod h1:PjTtoKLQpIRztrL+eKQw8030/S4c7rx/WvHydDJlpGE= -cloud.google.com/go/grafeas v0.3.6/go.mod h1:to6ECAPgRO2xeqD8ISXHc70nObJuaKZThreQOjeOH3o= -cloud.google.com/go/gsuiteaddons v1.6.11/go.mod h1:U7mk5PLBzDpHhgHv5aJkuvLp9RQzZFpa8hgWAB+xVIk= -cloud.google.com/go/iam v1.1.13 h1:7zWBXG9ERbMLrzQBRhFliAV+kjcRToDTgQT3CTwYyv4= -cloud.google.com/go/iam v1.1.13/go.mod h1:K8mY0uSXwEXS30KrnVb+j54LB/ntfZu1dr+4zFMNbus= -cloud.google.com/go/iap v1.9.10/go.mod h1:pO0FEirrhMOT1H0WVwpD5dD9r3oBhvsunyBQtNXzzc0= -cloud.google.com/go/ids v1.4.11/go.mod h1:+ZKqWELpJm8WcRRsSvKZWUdkriu4A3XsLLzToTv3418= -cloud.google.com/go/iot v1.7.11/go.mod h1:0vZJOqFy9kVLbUXwTP95e0dWHakfR4u5IWqsKMGIfHk= -cloud.google.com/go/kms v1.18.5/go.mod h1:yXunGUGzabH8rjUPImp2ndHiGolHeWJJ0LODLedicIY= -cloud.google.com/go/language v1.13.0/go.mod h1:B9FbD17g1EkilctNGUDAdSrBHiFOlKNErLljO7jplDU= -cloud.google.com/go/lifesciences v0.9.11/go.mod h1:NMxu++FYdv55TxOBEvLIhiAvah8acQwXsz79i9l9/RY= -cloud.google.com/go/logging v1.11.0/go.mod h1:5LDiJC/RxTt+fHc1LAt20R9TKiUTReDg6RuuFOZ67+A= +cloud.google.com/go/compute/metadata v0.5.1/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k= +cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I= +cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg= +cloud.google.com/go/contactcenterinsights v1.16.0/go.mod h1:cFGxDVm/OwEVAHbU9UO4xQCtQFn0RZSrSUcF/oJ0Bbs= +cloud.google.com/go/container v1.42.0/go.mod h1:YL6lDgCUi3frIWNIFU9qrmF7/6K1EYrtspmFTyyqJ+k= +cloud.google.com/go/containeranalysis v0.13.2/go.mod h1:AiKvXJkc3HiqkHzVIt6s5M81wk+q7SNffc6ZlkTDgiE= +cloud.google.com/go/datacatalog v1.23.0/go.mod h1:9Wamq8TDfL2680Sav7q3zEhBJSPBrDxJU8WtPJ25dBM= +cloud.google.com/go/datacatalog v1.24.0/go.mod h1:9Wamq8TDfL2680Sav7q3zEhBJSPBrDxJU8WtPJ25dBM= +cloud.google.com/go/dataflow v0.10.2/go.mod h1:+HIb4HJxDCZYuCqDGnBHZEglh5I0edi/mLgVbxDf0Ag= +cloud.google.com/go/dataform v0.10.2/go.mod h1:oZHwMBxG6jGZCVZqqMx+XWXK+dA/ooyYiyeRbUxI15M= +cloud.google.com/go/datafusion v1.8.2/go.mod h1:XernijudKtVG/VEvxtLv08COyVuiYPraSxm+8hd4zXA= +cloud.google.com/go/datalabeling v0.9.2/go.mod h1:8me7cCxwV/mZgYWtRAd3oRVGFD6UyT7hjMi+4GRyPpg= +cloud.google.com/go/dataplex v1.20.0/go.mod h1:vsxxdF5dgk3hX8Ens9m2/pMNhQZklUhSgqTghZtF1v4= +cloud.google.com/go/dataproc/v2 v2.10.0/go.mod h1:HD16lk4rv2zHFhbm8gGOtrRaFohMDr9f0lAUMLmg1PM= +cloud.google.com/go/dataqna v0.9.2/go.mod h1:WCJ7pwD0Mi+4pIzFQ+b2Zqy5DcExycNKHuB+VURPPgs= +cloud.google.com/go/datastore v1.20.0/go.mod h1:uFo3e+aEpRfHgtp5pp0+6M0o147KoPaYNaPAKpfh8Ew= +cloud.google.com/go/datastream v1.12.0/go.mod h1:RnFWa5zwR5SzHxeZGJOlQ4HKBQPcjGfD219Qy0qfh2k= +cloud.google.com/go/deploy v1.26.0/go.mod h1:h9uVCWxSDanXUereI5WR+vlZdbPJ6XGy+gcfC25v5rM= +cloud.google.com/go/dialogflow v1.63.0/go.mod h1:ilj5xjY1TRklKLle9ucy5ZiguwgeEIzqeJFIniKO5ng= +cloud.google.com/go/dlp v1.20.0/go.mod h1:nrGsA3r8s7wh2Ct9FWu69UjBObiLldNyQda2RCHgdaY= +cloud.google.com/go/documentai v1.35.0/go.mod h1:ZotiWUlDE8qXSUqkJsGMQqVmfTMYATwJEYqbPXTR9kk= +cloud.google.com/go/domains v0.10.2/go.mod h1:oL0Wsda9KdJvvGNsykdalHxQv4Ri0yfdDkIi3bzTUwk= +cloud.google.com/go/edgecontainer v1.4.0/go.mod h1:Hxj5saJT8LMREmAI9tbNTaBpW5loYiWFyisCjDhzu88= +cloud.google.com/go/errorreporting v0.3.2/go.mod h1:s5kjs5r3l6A8UUyIsgvAhGq6tkqyBCUss0FRpsoVTww= +cloud.google.com/go/essentialcontacts v1.7.2/go.mod h1:NoCBlOIVteJFJU+HG9dIG/Cc9kt1K9ys9mbOaGPUmPc= +cloud.google.com/go/eventarc v1.15.0/go.mod h1:PAd/pPIZdJtJQFJI1yDEUms1mqohdNuM1BFEVHHlVFg= +cloud.google.com/go/filestore v1.9.2/go.mod h1:I9pM7Hoetq9a7djC1xtmtOeHSUYocna09ZP6x+PG1Xw= +cloud.google.com/go/firestore v1.17.0/go.mod h1:69uPx1papBsY8ZETooc71fOhoKkD70Q1DwMrtKuOT/Y= +cloud.google.com/go/functions v1.19.2/go.mod h1:SBzWwWuaFDLnUyStDAMEysVN1oA5ECLbP3/PfJ9Uk7Y= +cloud.google.com/go/gkebackup v1.6.2/go.mod h1:WsTSWqKJkGan1pkp5dS30oxb+Eaa6cLvxEUxKTUALwk= +cloud.google.com/go/gkeconnect v0.12.0/go.mod h1:zn37LsFiNZxPN4iO7YbUk8l/E14pAJ7KxpoXoxt7Ly0= +cloud.google.com/go/gkehub v0.15.2/go.mod h1:8YziTOpwbM8LM3r9cHaOMy2rNgJHXZCrrmGgcau9zbQ= +cloud.google.com/go/gkemulticloud v1.4.1/go.mod h1:KRvPYcx53bztNwNInrezdfNF+wwUom8Y3FuJBwhvFpQ= +cloud.google.com/go/grafeas v0.3.11/go.mod h1:dcQyG2+T4tBgG0MvJAh7g2wl/xHV2w+RZIqivwuLjNg= +cloud.google.com/go/gsuiteaddons v1.7.2/go.mod h1:GD32J2rN/4APilqZw4JKmwV84+jowYYMkEVwQEYuAWc= +cloud.google.com/go/iam v1.3.0 h1:4Wo2qTaGKFtajbLpF6I4mywg900u3TLlHDb6mriLDPU= +cloud.google.com/go/iam v1.3.0/go.mod h1:0Ys8ccaZHdI1dEUilwzqng/6ps2YB6vRsjIe00/+6JY= +cloud.google.com/go/iap v1.10.2/go.mod h1:cClgtI09VIfazEK6VMJr6bX8KQfuQ/D3xqX+d0wrUlI= +cloud.google.com/go/ids v1.5.2/go.mod h1:P+ccDD96joXlomfonEdCnyrHvE68uLonc7sJBPVM5T0= +cloud.google.com/go/iot v1.8.2/go.mod h1:UDwVXvRD44JIcMZr8pzpF3o4iPsmOO6fmbaIYCAg1ww= +cloud.google.com/go/kms v1.20.3/go.mod h1:YvX+xhp2E2Sc3vol5IcRlBhH14Ecl3kegUY/DtH7EWQ= +cloud.google.com/go/language v1.14.2/go.mod h1:dviAbkxT9art+2ioL9AM05t+3Ql6UPfMpwq1cDsF+rg= +cloud.google.com/go/lifesciences v0.10.2/go.mod h1:vXDa34nz0T/ibUNoeHnhqI+Pn0OazUTdxemd0OLkyoY= +cloud.google.com/go/logging v1.10.0/go.mod h1:EHOwcxlltJrYGqMGfghSet736KR3hX1MAj614mrMk9I= +cloud.google.com/go/logging v1.12.0 h1:ex1igYcGFd4S/RZWOCU51StlIEuey5bjqwH9ZYjHibk= +cloud.google.com/go/logging v1.12.0/go.mod h1:wwYBt5HlYP1InnrtYI0wtwttpVU1rifnMT7RejksUAM= cloud.google.com/go/longrunning v0.1.1/go.mod h1:UUFxuDWkv22EuY93jjmDMFT5GPQKeFVJBIF6QlTqdsE= cloud.google.com/go/longrunning v0.4.1/go.mod h1:4iWDqhBZ70CvZ6BfETbvam3T8FMvLK+eFj0E6AaRQTo= cloud.google.com/go/longrunning v0.5.1/go.mod h1:spvimkwdz6SPWKEt/XBij79E9fiTkHSQl/fRUUQJYJc= @@ -102,68 +117,71 @@ cloud.google.com/go/longrunning v0.5.2/go.mod h1:nqo6DQbNV2pXhGDbDMoN2bWz68MjZUz cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA= cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= cloud.google.com/go/longrunning v0.5.9/go.mod h1:HD+0l9/OOW0za6UWdKJtXoFAX/BGg/3Wj8p10NeWF7c= -cloud.google.com/go/longrunning v0.5.10/go.mod h1:tljz5guTr5oc/qhlUjBlk7UAIFMOGuPNxkNDZXlLics= -cloud.google.com/go/longrunning v0.5.11/go.mod h1:rDn7//lmlfWV1Dx6IB4RatCPenTwwmqXuiP0/RgoEO4= -cloud.google.com/go/longrunning v0.5.12 h1:5LqSIdERr71CqfUsFlJdBpOkBH8FBCFD7P1nTWy3TYE= -cloud.google.com/go/longrunning v0.5.12/go.mod h1:S5hMV8CDJ6r50t2ubVJSKQVv5u0rmik5//KgLO3k4lU= -cloud.google.com/go/managedidentities v1.6.11/go.mod h1:df+8oZ1D4Eri+NrcpuiR5Hd6MGgiMqn0ZCzNmBYPS0A= -cloud.google.com/go/maps v1.11.6/go.mod h1:MOS/NN0L6b7Kumr8bLux9XTpd8+D54DYxBMUjq+XfXs= -cloud.google.com/go/mediatranslation v0.8.11/go.mod h1:3sNEm0fx61eHk7rfzBzrljVV9XKr931xI3OFacQBVFg= -cloud.google.com/go/memcache v1.10.11/go.mod h1:ubJ7Gfz/xQawQY5WO5pht4Q0dhzXBFeEszAeEJnwBHU= -cloud.google.com/go/metastore v1.13.10/go.mod h1:RPhMnBxUmTLT1fN7fNbPqtH5EoGHueDxubmJ1R1yT84= -cloud.google.com/go/monitoring v1.20.4/go.mod h1:v7F/UcLRw15EX7xq565N7Ae5tnYEE28+Cl717aTXG4c= -cloud.google.com/go/networkconnectivity v1.14.10/go.mod h1:f7ZbGl4CV08DDb7lw+NmMXQTKKjMhgCEEwFbEukWuOY= -cloud.google.com/go/networkmanagement v1.13.6/go.mod h1:WXBijOnX90IFb6sberjnGrVtZbgDNcPDUYOlGXmG8+4= -cloud.google.com/go/networksecurity v0.9.11/go.mod h1:4xbpOqCwplmFgymAjPFM6ZIplVC6+eQ4m7sIiEq9oJA= -cloud.google.com/go/notebooks v1.11.9/go.mod h1:JmnRX0eLgHRJiyxw8HOgumW9iRajImZxr7r75U16uXw= -cloud.google.com/go/optimization v1.6.9/go.mod h1:mcvkDy0p4s5k7iSaiKrwwpN0IkteHhGmuW5rP9nXA5M= -cloud.google.com/go/orchestration v1.9.6/go.mod h1:gQvdIsHESZJigimnbUA8XLbYeFlSg/z+A7ppds5JULg= -cloud.google.com/go/orgpolicy v1.12.5/go.mod h1:f778/jOHKp6cP6NbbQgjy4SDfQf6BoVGiSWdxky3ONQ= -cloud.google.com/go/orgpolicy v1.12.7/go.mod h1:Os3GlUFRPf1UxOHTup5b70BARnhHeQNNVNZzJXPbWYI= -cloud.google.com/go/osconfig v1.13.0/go.mod h1:tlACnQi1rtSLnHRYzfw9SH9zXs0M7S1jqiW2EOCn2Y0= -cloud.google.com/go/osconfig v1.13.2/go.mod h1:eupylkWQJCwSIEMkpVR4LqpgKkQi0mD4m1DzNCgpQso= -cloud.google.com/go/oslogin v1.13.7/go.mod h1:xq027cL0fojpcEcpEQdWayiDn8tIx3WEFYMM6+q7U+E= -cloud.google.com/go/phishingprotection v0.8.11/go.mod h1:Mge0cylqVFs+D0EyxlsTOJ1Guf3qDgrztHzxZqkhRQM= -cloud.google.com/go/policytroubleshooter v1.10.9/go.mod h1:X8HEPVBWz8E+qwI/QXnhBLahEHdcuPO3M9YvSj0LDek= -cloud.google.com/go/privatecatalog v0.9.11/go.mod h1:awEF2a8M6UgoqVJcF/MthkF8SSo6OoWQ7TtPNxUlljY= -cloud.google.com/go/pubsub v1.41.0/go.mod h1:g+YzC6w/3N91tzG66e2BZtp7WrpBBMXVa3Y9zVoOGpk= +cloud.google.com/go/longrunning v0.6.0/go.mod h1:uHzSZqW89h7/pasCWNYdUpwGz3PcVWhrWupreVPYLts= +cloud.google.com/go/longrunning v0.6.1/go.mod h1:nHISoOZpBcmlwbJmiVk5oDRz0qG/ZxPynEGs1iZ79s0= +cloud.google.com/go/longrunning v0.6.2/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= +cloud.google.com/go/longrunning v0.6.3 h1:A2q2vuyXysRcwzqDpMMLSI6mb6o39miS52UEG/Rd2ng= +cloud.google.com/go/longrunning v0.6.3/go.mod h1:k/vIs83RN4bE3YCswdXC5PFfWVILjm3hpEUlSko4PiI= +cloud.google.com/go/managedidentities v1.7.2/go.mod h1:t0WKYzagOoD3FNtJWSWcU8zpWZz2i9cw2sKa9RiPx5I= +cloud.google.com/go/maps v1.17.0/go.mod h1:7LSQFPyfIrX7fAlLSUFYHmKCnJy0QYclWhm3UsfsZYw= +cloud.google.com/go/mediatranslation v0.9.2/go.mod h1:1xyRoDYN32THzy+QaU62vIMciX0CFexplju9t30XwUc= +cloud.google.com/go/memcache v1.11.2/go.mod h1:jIzHn79b0m5wbkax2SdlW5vNSbpaEk0yWHbeLpMIYZE= +cloud.google.com/go/metastore v1.14.2/go.mod h1:dk4zOBhZIy3TFOQlI8sbOa+ef0FjAcCHEnd8dO2J+LE= +cloud.google.com/go/monitoring v1.22.0 h1:mQ0040B7dpuRq1+4YiQD43M2vW9HgoVxY98xhqGT+YI= +cloud.google.com/go/monitoring v1.22.0/go.mod h1:hS3pXvaG8KgWTSz+dAdyzPrGUYmi2Q+WFX8g2hqVEZU= +cloud.google.com/go/networkconnectivity v1.16.0/go.mod h1:N1O01bEk5z9bkkWwXLKcN2T53QN49m/pSpjfUvlHDQY= +cloud.google.com/go/networkmanagement v1.17.0/go.mod h1:Yc905R9U5jik5YMt76QWdG5WqzPU4ZsdI/mLnVa62/Q= +cloud.google.com/go/networksecurity v0.10.2/go.mod h1:puU3Gwchd6Y/VTyMkL50GI2RSRMS3KXhcDBY1HSOcck= +cloud.google.com/go/notebooks v1.12.2/go.mod h1:EkLwv8zwr8DUXnvzl944+sRBG+b73HEKzV632YYAGNI= +cloud.google.com/go/optimization v1.7.2/go.mod h1:msYgDIh1SGSfq6/KiWJQ/uxMkWq8LekPyn1LAZ7ifNE= +cloud.google.com/go/orchestration v1.11.2/go.mod h1:ESdQV8u+75B+uNf5PBwJC9Qn+SNT8kkiP3FFFN5nns4= +cloud.google.com/go/orgpolicy v1.14.0/go.mod h1:S6Pveh1JOxpSbs6+2ToJG7h3HwqC6Uf1YQ6JYG7wdM8= +cloud.google.com/go/orgpolicy v1.14.1/go.mod h1:1z08Hsu1mkoH839X7C8JmnrqOkp2IZRSxiDw7W/Xpg4= +cloud.google.com/go/osconfig v1.14.1/go.mod h1:Rk62nyQscgy8x4bICaTn0iWiip5EpwEfG2UCBa2TP/s= +cloud.google.com/go/osconfig v1.14.2/go.mod h1:kHtsm0/j8ubyuzGciBsRxFlbWVjc4c7KdrwJw0+g+pQ= +cloud.google.com/go/oslogin v1.14.2/go.mod h1:M7tAefCr6e9LFTrdWRQRrmMeKHbkvc4D9g6tHIjHySA= +cloud.google.com/go/phishingprotection v0.9.2/go.mod h1:mSCiq3tD8fTJAuXq5QBHFKZqMUy8SfWsbUM9NpzJIRQ= +cloud.google.com/go/policytroubleshooter v1.11.2/go.mod h1:1TdeCRv8Qsjcz2qC3wFltg/Mjga4HSpv8Tyr5rzvPsw= +cloud.google.com/go/privatecatalog v0.10.2/go.mod h1:o124dHoxdbO50ImR3T4+x3GRwBSTf4XTn6AatP8MgsQ= +cloud.google.com/go/pubsub v1.45.3/go.mod h1:cGyloK/hXC4at7smAtxFnXprKEFTqmMXNNd9w+bd94Q= cloud.google.com/go/pubsublite v1.8.2/go.mod h1:4r8GSa9NznExjuLPEJlF1VjOPOpgf3IT6k8x/YgaOPI= -cloud.google.com/go/recaptchaenterprise/v2 v2.14.2/go.mod h1:MwPgdgvBkE46aWuuXeBTCB8hQJ88p+CpXInROZYCTkc= -cloud.google.com/go/recommendationengine v0.8.11/go.mod h1:cEkU4tCXAF88a4boMFZym7U7uyxvVwcQtKzS85IbQio= -cloud.google.com/go/recommender v1.12.7/go.mod h1:lG8DVtczLltWuaCv4IVpNphONZTzaCC9KdxLYeZM5G4= -cloud.google.com/go/redis v1.16.4/go.mod h1:unCVfLP5eFrVhGLDnb7IaSaWxuZ+7cBgwwBwbdG9m9w= -cloud.google.com/go/resourcemanager v1.9.11/go.mod h1:SbNAbjVLoi2rt9G74bEYb3aw1iwvyWPOJMnij4SsmHA= -cloud.google.com/go/resourcesettings v1.7.4/go.mod h1:seBdLuyeq+ol2u9G2+74GkSjQaxaBWF+vVb6mVzQFG0= -cloud.google.com/go/retail v1.17.4/go.mod h1:oPkL1FzW7D+v/hX5alYIx52ro2FY/WPAviwR1kZZTMs= -cloud.google.com/go/run v1.4.0/go.mod h1:4G9iHLjdOC+CQ0CzA0+6nLeR6NezVPmlj+GULmb0zE4= -cloud.google.com/go/scheduler v1.10.12/go.mod h1:6DRtOddMWJ001HJ6MS148rtLSh/S2oqd2hQC3n5n9fQ= -cloud.google.com/go/secretmanager v1.13.6/go.mod h1:x2ySyOrqv3WGFRFn2Xk10iHmNmvmcEVSSqc30eb1bhw= -cloud.google.com/go/security v1.17.4/go.mod h1:KMuDJH+sEB3KTODd/tLJ7kZK+u2PQt+Cfu0oAxzIhgo= -cloud.google.com/go/securitycenter v1.33.1/go.mod h1:jeFisdYUWHr+ig72T4g0dnNCFhRwgwGoQV6GFuEwafw= -cloud.google.com/go/servicedirectory v1.11.11/go.mod h1:pnynaftaj9LmRLIc6t3r7r7rdCZZKKxui/HaF/RqYfs= -cloud.google.com/go/shell v1.7.11/go.mod h1:SywZHWac7onifaT9m9MmegYp3GgCLm+tgk+w2lXK8vg= -cloud.google.com/go/spanner v1.65.0/go.mod h1:dQGB+w5a67gtyE3qSKPPxzniedrnAmV6tewQeBY7Hxs= -cloud.google.com/go/speech v1.24.0/go.mod h1:HcVyIh5jRXM5zDMcbFCW+DF2uK/MSGN6Rastt6bj1ic= -cloud.google.com/go/storage v1.43.0 h1:CcxnSohZwizt4LCzQHWvBf1/kvtHUn7gk9QERXPyXFs= -cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0= -cloud.google.com/go/storagetransfer v1.10.10/go.mod h1:8+nX+WgQ2ZJJnK8e+RbK/zCXk8T7HdwyQAJeY7cEcm0= -cloud.google.com/go/talent v1.6.12/go.mod h1:nT9kNVuJhZX2QgqKZS6t6eCWZs5XEBYRBv6bIMnPmo4= -cloud.google.com/go/texttospeech v1.7.11/go.mod h1:Ua125HU+WT2IkIo5MzQtuNpNEk72soShJQVdorZ1SAE= -cloud.google.com/go/tpu v1.6.11/go.mod h1:W0C4xaSj1Ay3VX/H96FRvLt2HDs0CgdRPVI4e7PoCDk= -cloud.google.com/go/trace v1.10.12/go.mod h1:tYkAIta/gxgbBZ/PIzFxSH5blajgX4D00RpQqCG/GZs= +cloud.google.com/go/recaptchaenterprise/v2 v2.19.1/go.mod h1:vnbA2SpVPPwKeoFrCQxR+5a0JFRRytwBBG69Zj9pGfk= +cloud.google.com/go/recommendationengine v0.9.2/go.mod h1:DjGfWZJ68ZF5ZuNgoTVXgajFAG0yLt4CJOpC0aMK3yw= +cloud.google.com/go/recommender v1.13.2/go.mod h1:XJau4M5Re8F4BM+fzF3fqSjxNJuM66fwF68VCy/ngGE= +cloud.google.com/go/redis v1.17.2/go.mod h1:h071xkcTMnJgQnU/zRMOVKNj5J6AttG16RDo+VndoNo= +cloud.google.com/go/resourcemanager v1.10.2/go.mod h1:5f+4zTM/ZOTDm6MmPOp6BQAhR0fi8qFPnvVGSoWszcc= +cloud.google.com/go/resourcesettings v1.8.2/go.mod h1:uEgtPiMA+xuBUM4Exu+ZkNpMYP0BLlYeJbyNHfrc+U0= +cloud.google.com/go/retail v1.19.1/go.mod h1:W48zg0zmt2JMqmJKCuzx0/0XDLtovwzGAeJjmv6VPaE= +cloud.google.com/go/run v1.8.0/go.mod h1:IvJOg2TBb/5a0Qkc6crn5yTy5nkjcgSWQLhgO8QL8PQ= +cloud.google.com/go/scheduler v1.11.2/go.mod h1:GZSv76T+KTssX2I9WukIYQuQRf7jk1WI+LOcIEHUUHk= +cloud.google.com/go/secretmanager v1.14.2/go.mod h1:Q18wAPMM6RXLC/zVpWTlqq2IBSbbm7pKBlM3lCKsmjw= +cloud.google.com/go/security v1.18.2/go.mod h1:3EwTcYw8554iEtgK8VxAjZaq2unFehcsgFIF9nOvQmU= +cloud.google.com/go/securitycenter v1.35.2/go.mod h1:AVM2V9CJvaWGZRHf3eG+LeSTSissbufD27AVBI91C8s= +cloud.google.com/go/servicedirectory v1.12.2/go.mod h1:F0TJdFjqqotiZRlMXgIOzszaplk4ZAmUV8ovHo08M2U= +cloud.google.com/go/shell v1.8.2/go.mod h1:QQR12T6j/eKvqAQLv6R3ozeoqwJ0euaFSz2qLqG93Bs= +cloud.google.com/go/spanner v1.73.0/go.mod h1:mw98ua5ggQXVWwp83yjwggqEmW9t8rjs9Po1ohcUGW4= +cloud.google.com/go/speech v1.25.2/go.mod h1:KPFirZlLL8SqPaTtG6l+HHIFHPipjbemv4iFg7rTlYs= +cloud.google.com/go/storage v1.47.0 h1:ajqgt30fnOMmLfWfu1PWcb+V9Dxz6n+9WKjdNg5R4HM= +cloud.google.com/go/storage v1.47.0/go.mod h1:Ks0vP374w0PW6jOUameJbapbQKXqkjGd/OJRp2fb9IQ= +cloud.google.com/go/storagetransfer v1.11.2/go.mod h1:FcM29aY4EyZ3yVPmW5SxhqUdhjgPBUOFyy4rqiQbias= +cloud.google.com/go/talent v1.7.2/go.mod h1:k1sqlDgS9gbc0gMTRuRQpX6C6VB7bGUxSPcoTRWJod8= +cloud.google.com/go/texttospeech v1.10.0/go.mod h1:215FpCOyRxxrS7DSb2t7f4ylMz8dXsQg8+Vdup5IhP4= +cloud.google.com/go/tpu v1.7.2/go.mod h1:0Y7dUo2LIbDUx0yQ/vnLC6e18FK6NrDfAhYS9wZ/2vs= +cloud.google.com/go/trace v1.11.2 h1:4ZmaBdL8Ng/ajrgKqY5jfvzqMXbrDcBsUGXOT9aqTtI= +cloud.google.com/go/trace v1.11.2/go.mod h1:bn7OwXd4pd5rFuAnTrzBuoZ4ax2XQeG3qNgYmfCy0Io= cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs= -cloud.google.com/go/translate v1.10.7/go.mod h1:mH/+8tvcItuy1cOWqU+/Y3iFHgkVUObNIQYI/kiFFiY= -cloud.google.com/go/video v1.22.0/go.mod h1:CxPshUNAb1ucnzbtruEHlAal9XY+SPG2cFqC/woJzII= -cloud.google.com/go/videointelligence v1.11.11/go.mod h1:dab2Ca3AXT6vNJmt3/6ieuquYRckpsActDekLcsd6dU= -cloud.google.com/go/vision/v2 v2.8.6/go.mod h1:G3v0uovxCye3u369JfrHGY43H6u/IQ08x9dw5aVH8yY= -cloud.google.com/go/vmmigration v1.7.11/go.mod h1:PmD1fDB0TEHGQR1tDZt9GEXFB9mnKKalLcTVRJKzcQA= -cloud.google.com/go/vmwareengine v1.2.0/go.mod h1:rPjCHu6hG9N8d6PhkoDWFkqL9xpbFY+ueVW+0pNFbZg= -cloud.google.com/go/vpcaccess v1.7.11/go.mod h1:a2cuAiSCI4TVK0Dt6/dRjf22qQvfY+podxst2VvAkcI= -cloud.google.com/go/webrisk v1.9.11/go.mod h1:mK6M8KEO0ZI7VkrjCq3Tjzw4vYq+3c4DzlMUDVaiswE= -cloud.google.com/go/websecurityscanner v1.6.11/go.mod h1:vhAZjksELSg58EZfUQ1BMExD+hxqpn0G0DuyCZQjiTg= -cloud.google.com/go/workflows v1.12.10/go.mod h1:RcKqCiOmKs8wFUEf3EwWZPH5eHc7Oq0kamIyOUCk0IE= -code.cloudfoundry.org/bytefmt v0.1.0 h1:NmVhaUPBO59QQpt5vwYW8crDUksCnvTCQi+Q6uOeLwM= -code.cloudfoundry.org/bytefmt v0.1.0/go.mod h1:eF2ZbltNI7Pv+8Cuyeksu9up5FN5konuH0trDJBuscw= +cloud.google.com/go/translate v1.12.2/go.mod h1:jjLVf2SVH2uD+BNM40DYvRRKSsuyKxVvs3YjTW/XSWY= +cloud.google.com/go/video v1.23.2/go.mod h1:rNOr2pPHWeCbW0QsOwJRIe0ZiuwHpHtumK0xbiYB1Ew= +cloud.google.com/go/videointelligence v1.12.2/go.mod h1:8xKGlq0lNVyT8JgTkkCUCpyNJnYYEJVWGdqzv+UcwR8= +cloud.google.com/go/vision/v2 v2.9.2/go.mod h1:WuxjVQdAy4j4WZqY5Rr655EdAgi8B707Vdb5T8c90uo= +cloud.google.com/go/vmmigration v1.8.2/go.mod h1:FBejrsr8ZHmJb949BSOyr3D+/yCp9z9Hk0WtsTiHc1Q= +cloud.google.com/go/vmwareengine v1.3.2/go.mod h1:JsheEadzT0nfXOGkdnwtS1FhFAnj4g8qhi4rKeLi/AU= +cloud.google.com/go/vpcaccess v1.8.2/go.mod h1:4yvYKNjlNjvk/ffgZ0PuEhpzNJb8HybSM1otG2aDxnY= +cloud.google.com/go/webrisk v1.10.2/go.mod h1:c0ODT2+CuKCYjaeHO7b0ni4CUrJ95ScP5UFl9061Qq8= +cloud.google.com/go/websecurityscanner v1.7.2/go.mod h1:728wF9yz2VCErfBaACA5px2XSYHQgkK812NmHcUsDXA= +cloud.google.com/go/workflows v1.13.2/go.mod h1:l5Wj2Eibqba4BsADIRzPLaevLmIuYF2W+wfFBkRG3vU= +code.cloudfoundry.org/bytefmt v0.22.0 h1:gu5ebZR/n3BMeiLpjF1rb/NZcqD/1vwNBNWp1uWjz8Y= +code.cloudfoundry.org/bytefmt v0.22.0/go.mod h1:gVWU9Xk7D6PqXdpiCKxVv7X9OXqyeE38BugdzZMRaNg= dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= dmitri.shuralyov.com/gpu/mtl v0.0.0-20221208032759-85de2813cf6b/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= eliasnaur.com/font v0.0.0-20230308162249-dd43949cb42d/go.mod h1:OYVuxibdk9OSLX8vAqydtRPP87PyTFcT9uH3MlEGBQA= @@ -183,19 +201,29 @@ gioui.org/x v0.2.0/go.mod h1:rCGN2nZ8ZHqrtseJoQxCMZpt2xrZUrdZ2WuMRLBJmYs= git.sr.ht/~jackmordaunt/go-toast v1.0.0/go.mod h1:aIuRX/HdBOz7yRS8rOVYQCwJQlFS7DbYBTpUV0SHeeg= git.sr.ht/~sbinet/cmpimg v0.1.0 h1:E0zPRk2muWuCqSKSVZIWsgtU9pjsw3eKHi8VmQeScxo= git.sr.ht/~sbinet/cmpimg v0.1.0/go.mod h1:FU12psLbF4TfNXkKH2ZZQ29crIqoiqTZmeQ7dkp/pxE= -git.sr.ht/~sbinet/gg v0.5.0 h1:6V43j30HM623V329xA9Ntq+WJrMjDxRjuAB1LFWF5m8= -git.sr.ht/~sbinet/gg v0.5.0/go.mod h1:G2C0eRESqlKhS7ErsNey6HHrqU1PwsnCQlekFi9Q2Oo= +git.sr.ht/~sbinet/gg v0.6.0 h1:RIzgkizAk+9r7uPzf/VfbJHBMKUr0F5hRFxTUGMnt38= +git.sr.ht/~sbinet/gg v0.6.0/go.mod h1:uucygbfC9wVPQIfrmwM2et0imr8L7KQWywX0xpFMm94= git.wow.st/gmp/jni v0.0.0-20210610011705-34026c7e22d0/go.mod h1:+axXBRUTIDlCeE73IKeD/os7LoEnTKdkp8/gQOFjqyo= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= -github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0= +github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v1.4.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= github.com/GoogleCloudPlatform/grpc-gcp-go/grpcgcp v1.5.0/go.mod h1:dppbR7CwXD4pgtV9t3wD1812RaLDcBjtblcDF5f1vI0= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.0/go.mod h1:p2puVVSKjQ84Qb1gzw2XHLs34WQyHTYFZLaVxypAFYs= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.1/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2 h1:cZpsGsWTIFKymTA0je7IIvi1O7Es7apb9CF3EQlOcfE= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.24.2/go.mod h1:itPGVDKf9cC/ov4MdvJ2QZ0khw4bfoo9jzwTJlaxy2k= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1 h1:UQ0AhxogsIRZDkElkblfnwjc3IaltCm2HUMvezQaL7s= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.48.1/go.mod h1:jyqM3eLpJ3IbIFDTKVz2rF9T/xWGW0rIriGwnz8l9Tk= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1 h1:oTX4vsorBZo/Zdum6OKPA4o7544hm6smoRv1QjpTwGo= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.48.1/go.mod h1:0wEl7vrAD8mehJyohS9HZy+WyEOaQO2mJx86Cvh93kM= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1 h1:8nn+rsCvTq9axyEh382S0PFLBeaFwNsT43IrPWzctRU= +github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.48.1/go.mod h1:viRWSEhtMZqz1rhwmOVKkWl6SwmVowfL9O2YR5gI2PE= github.com/JohnCGriffin/overflow v0.0.0-20211019200055-46fa312c352c/go.mod h1:X0CRv0ky0k6m906ixxpzmDRLvX58TFUKS2eePweuyxk= github.com/Kodeworks/golang-image-ico v0.0.0-20141118225523-73f0f4cfade9/go.mod h1:7uhhqiBaR4CpN0k9rMjOtjpcfGd6DG2m04zQxKnWQ0I= -github.com/ajstarks/deck v0.0.0-20240814155529-0478e0c25be8/go.mod h1:5o5HzZ3nUiOivE0SPQepE7oNquDd+9yip0PtlFpq888= -github.com/ajstarks/deck/generate v0.0.0-20240814155529-0478e0c25be8/go.mod h1:al/X+Mdfx3esXeGnnIVn5aYB6SfwTu+9T0u4EXmKJuk= +github.com/ajstarks/deck v0.0.0-20240918141114-8d365813662d/go.mod h1:5o5HzZ3nUiOivE0SPQepE7oNquDd+9yip0PtlFpq888= +github.com/ajstarks/deck/generate v0.0.0-20240918141114-8d365813662d/go.mod h1:al/X+Mdfx3esXeGnnIVn5aYB6SfwTu+9T0u4EXmKJuk= github.com/ajstarks/fc v0.0.0-20230606144319-ef5d5cb73a3d/go.mod h1:Qp3TfzbBiIjHwDxIpu+g9nYfNw+xXF2Yqp4WmMlTtwM= github.com/ajstarks/openvg v0.0.0-20191008131700-c6885d824eb8/go.mod h1:jpZHIkd4sQEgrzshrUQrRfv5OUMMq0w/Q1yK6ZYhUlk= github.com/ajstarks/svgo v0.0.0-20211024235047-1546f124cd8b h1:slYM766cy2nI3BwyRiyQj/Ud48djTMtMebDqepE95rw= @@ -218,49 +246,52 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPd github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU= github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU= -github.com/aws/aws-sdk-go-v2 v1.30.4 h1:frhcagrVNrzmT95RJImMHgabt99vkXGslubDaDagTk8= -github.com/aws/aws-sdk-go-v2 v1.30.4/go.mod h1:CT+ZPWXbYrci8chcARI3OmI/qgd+f6WtuLOoaIA8PR0= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4 h1:70PVAiL15/aBMh5LThwgXdSQorVr91L127ttckI9QQU= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.4/go.mod h1:/MQxMqci8tlqDH+pjmoLu1i0tbWCUP1hhyMRuFxpQCw= -github.com/aws/aws-sdk-go-v2/config v1.27.28 h1:OTxWGW/91C61QlneCtnD62NLb4W616/NM1jA8LhJqbg= -github.com/aws/aws-sdk-go-v2/config v1.27.28/go.mod h1:uzVRVtJSU5EFv6Fu82AoVFKozJi2ZCY6WRCXj06rbvs= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28 h1:m8+AHY/ND8CMHJnPoH7PJIRakWGa4gbfbxuY9TGTUXM= -github.com/aws/aws-sdk-go-v2/credentials v1.17.28/go.mod h1:6TF7dSc78ehD1SL6KpRIPKMA1GyyWflIkjqg+qmf4+c= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12 h1:yjwoSyDZF8Jth+mUk5lSPJCkMC0lMy6FaCD51jm6ayE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.12/go.mod h1:fuR57fAgMk7ot3WcNQfb6rSEn+SUffl7ri+aa8uKysI= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11 h1:FEDZD/Axt5tKSkPAs967KZ++MkvYdBqr0a+cetRbjLM= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.11/go.mod h1:dvlsbA32KfvCzqwTiX7maABgFek2RyUuYEJ3kyn/PmQ= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16 h1:TNyt/+X43KJ9IJJMjKfa3bNTiZbUP7DeCxfbTROESwY= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.16/go.mod h1:2DwJF39FlNAUiX5pAc0UNeiz16lK2t7IaFcm0LFHEgc= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16 h1:jYfy8UPmd+6kJW5YhY0L1/KftReOGxI/4NtVSTh9O/I= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.16/go.mod h1:7ZfEPZxkW42Afq4uQB8H2E2e6ebh6mXTueEpYzjCzcs= +github.com/aws/aws-sdk-go-v2 v1.32.6 h1:7BokKRgRPuGmKkFMhEg/jSul+tB9VvXhcViILtfG8b4= +github.com/aws/aws-sdk-go-v2 v1.32.6/go.mod h1:P5WJBrYqqbWVaOxgH0X/FYYD47/nooaPOZPlQdmiN2U= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7 h1:lL7IfaFzngfx0ZwUGOZdsFFnQ5uLvR0hWqqhyE7Q9M8= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.7/go.mod h1:QraP0UcVlQJsmHfioCrveWOC1nbiWUl3ej08h4mXWoc= +github.com/aws/aws-sdk-go-v2/config v1.28.6 h1:D89IKtGrs/I3QXOLNTH93NJYtDhm8SYa9Q5CsPShmyo= +github.com/aws/aws-sdk-go-v2/config v1.28.6/go.mod h1:GDzxJ5wyyFSCoLkS+UhGB0dArhb9mI+Co4dHtoTxbko= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47 h1:48bA+3/fCdi2yAwVt+3COvmatZ6jUDNkDTIsqDiMUdw= +github.com/aws/aws-sdk-go-v2/credentials v1.17.47/go.mod h1:+KdckOejLW3Ks3b0E3b5rHsr2f9yuORBum0WPnE5o5w= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21 h1:AmoU1pziydclFT/xRV+xXE/Vb8fttJCLRPv8oAkprc0= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.21/go.mod h1:AjUdLYe4Tgs6kpH4Bv7uMZo7pottoyHMn4eTcIcneaY= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43 h1:iLdpkYZ4cXIQMO7ud+cqMWR1xK5ESbt1rvN77tRi1BY= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.17.43/go.mod h1:OgbsKPAswXDd5kxnR4vZov69p3oYjbvUyIRBAAV0y9o= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25 h1:s/fF4+yDQDoElYhfIVvSNyeCydfbuTKzhxSXDXCPasU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.25/go.mod h1:IgPfDv5jqFIzQSNbUEMoitNooSMXjRSDkhXv8jiROvU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25 h1:ZntTCl5EsYnhN/IygQEUugpdwbhdkom9uHcbCftiGgA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.25/go.mod h1:DBdPrgeocww+CSl1C8cEV8PN1mHMBhuCDLpXezyvWkE= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1 h1:VaRN3TlFdd6KxX1x3ILT5ynH6HvKgqdiXoTxAF4HQcQ= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.1/go.mod h1:FbtygfRFze9usAadmnGJNc8KsP346kEe+y2/oyhGAGc= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15 h1:Z5r7SycxmSllHYmaAZPpmN8GviDrSGhMS6bldqtXZPw= github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.15/go.mod h1:CetW7bDE00QoGEmPUoZuRog07SGVAUVW6LFpNP0YfIg= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4 h1:KypMCbLPPHEmf9DgMGw51jMj77VfGPAN2Kv4cfhlfgI= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.4/go.mod h1:Vz1JQXliGcQktFTN/LN6uGppAIRoLBR2bMvIMP0gOjc= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18 h1:GckUnpm4EJOAio1c8o25a+b3lVfwVzC9gnSBqiiNmZM= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.3.18/go.mod h1:Br6+bxfG33Dk3ynmkhsW2Z/t9D4+lRqdLDNCKi85w0U= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18 h1:tJ5RnkHCiSH0jyd6gROjlJtNwov0eGYNz8s8nFcR0jQ= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.18/go.mod h1:++NHzT+nAF7ZPrHPsA+ENvsXkOO8wEu+C6RXltAG4/c= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16 h1:jg16PhLPUiHIj8zYIW6bqzeQSuHVEiWnGA0Brz5Xv2I= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.17.16/go.mod h1:Uyk1zE1VVdsHSU7096h/rwnXDzOzYQVl+FNPhPw7ShY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0 h1:Cso4Ev/XauMVsbwdhYEoxg8rxZWw43CFqqaPB5w3W2c= -github.com/aws/aws-sdk-go-v2/service/s3 v1.59.0/go.mod h1:BSPI0EfnYUuNHPS0uqIo5VrRwzie+Fp+YhQOUs16sKI= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.5 h1:zCsFCKvbj25i7p1u94imVoO447I/sFv8qq+lGJhRN0c= -github.com/aws/aws-sdk-go-v2/service/sso v1.22.5/go.mod h1:ZeDX1SnKsVlejeuz41GiajjZpRSWR7/42q/EyA/QEiM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1 h1:iXtILhvDxB6kPvEXgsDhGaZCSC6LQET5ZHSdJozeI0Y= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.1/go.mod h1:9nu0fVANtYiAePIBh2/pFUSwtJ402hLnp854CNoDOeE= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6 h1:HCpPsWqmYQieU7SS6E9HXfdAMSud0pteVXieJmcpIRI= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.4.6/go.mod h1:ngUiVRCco++u+soRRVBIvBZxSMMvOVMXA4PJ36JLfSw= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6 h1:50+XsN70RS7dwJ2CkVNXzj7U2L1HKP8nqTd3XWEXBN4= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.6/go.mod h1:WqgLmwY7so32kG01zD8CPTJWVWM+TzJoOVHwTg4aPug= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6 h1:BbGDtTi0T1DYlmjBiCr/le3wzhA37O8QTC5/Ab8+EXk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.6/go.mod h1:hLMJt7Q8ePgViKupeymbqI0la+t9/iYFBjxQCFwuAwI= +github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0 h1:nyuzXooUNJexRT0Oy0UQY6AhOzxPxhtt4DcBIHyCnmw= +github.com/aws/aws-sdk-go-v2/service/s3 v1.71.0/go.mod h1:sT/iQz8JK3u/5gZkT+Hmr7GzVZehUMkRZpOaAwYXeGY= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7 h1:rLnYAfXQ3YAccocshIH5mzNNwZBkBo+bP6EhIxak6Hw= +github.com/aws/aws-sdk-go-v2/service/sso v1.24.7/go.mod h1:ZHtuQJ6t9A/+YDuxOLnbryAmITtr8UysSny3qcyvJTc= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE= github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4 h1:iAckBT2OeEK/kBDyN/jDtpEExhjeeA/Im2q4X0rJZT8= -github.com/aws/aws-sdk-go-v2/service/sts v1.30.4/go.mod h1:vmSqFK+BVIwVpDAGZB3CoCXHzurt4qBE8lf+I/kRTh0= -github.com/aws/smithy-go v1.20.4 h1:2HK1zBdPgRbjFOHlfeQZfpC4r72MOb9bZkiFwggKO+4= -github.com/aws/smithy-go v1.20.4/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2 h1:s4074ZO1Hk8qv65GqNXqDjmkf4HSQqJukaLuuW0TpDA= +github.com/aws/aws-sdk-go-v2/service/sts v1.33.2/go.mod h1:mVggCnIWoM09jP71Wh+ea7+5gAp53q+49wDFs1SW5z8= +github.com/aws/smithy-go v1.22.1 h1:/HPHZQ0g7f4eUeK6HKglFz8uwVfZKgoI25rb/J+dnro= +github.com/aws/smithy-go v1.22.1/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg= +github.com/bazelbuild/rules_go v0.49.0/go.mod h1:Dhcz716Kqg1RHNWos+N6MlXNkjNP2EwZQ0LukRKJfMs= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/boombuler/barcode v1.0.2/go.mod h1:paBWMcWSl3LHKBqUq+rly7CNSldXjb2rDl3JlRe0mD8= github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= @@ -275,11 +306,11 @@ github.com/chzyer/logex v1.2.1/go.mod h1:JLbx6lG2kDbNRFnfkgvh4eRJRPX1QCoOIWomwys github.com/chzyer/readline v1.5.1/go.mod h1:Eh+b79XXUwfKfcPLepksvw2tcLE/Ct21YObkaSkeBlk= github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8= github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20 h1:N+3sFI5GUjRKBi+i0TxYVST9h4Ie192jJWpHvthBBgg= -github.com/cncf/xds/go v0.0.0-20240723142845-024c85f92f20/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/creack/pty v1.1.23 h1:4M6+isWdcStXEf15G/RbrMPOQj1dZ7HPZCGwE4kOeP0= -github.com/creack/pty v1.1.23/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= +github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57 h1:put7Je9ZyxbHtwr7IqGrW4LLVUupJQ2gbsDshKISSgU= +github.com/cncf/xds/go v0.0.0-20241213214725-57cfbe6fad57/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= +github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s= +github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/denisenkom/go-mssqldb v0.12.3 h1:pBSGx9Tq67pBOTLmxNuirNTeB8Vjmf886Kx+8Y+8shw= @@ -291,8 +322,8 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3 github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/emicklei/go-restful/v3 v3.12.1 h1:PJMDIM/ak7btuL8Ex0iYET9hxM3CI2sjZtzpL63nKAU= github.com/emicklei/go-restful/v3 v3.12.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/envoyproxy/go-control-plane v0.13.0 h1:HzkeUz1Knt+3bK+8LG1bxOO/jzWZmdxpwC51i202les= -github.com/envoyproxy/go-control-plane v0.13.0/go.mod h1:GRaKG3dwvFoTg4nj7aXdZnvMg4d7nvT/wl9WgVXn3Q8= +github.com/envoyproxy/go-control-plane v0.13.1 h1:vPfJZCkob6yTMEgS+0TwfTUfbHjfy/6vOJ8hUWX/uXE= +github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw= github.com/envoyproxy/protoc-gen-validate v1.1.0 h1:tntQDh69XqOCOZsDz0lVJQez/2L6Uu2PdjCQwWCJ3bM= github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4= github.com/esiqveland/notify v0.11.0/go.mod h1:63UbVSaeJwF0LVJARHFuPgUAoM7o1BEvCZyknsuonBc= @@ -304,16 +335,18 @@ github.com/evanphx/json-patch/v5 v5.9.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq github.com/fatih/color v1.10.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= -github.com/felixge/fgprof v0.9.4 h1:ocDNwMFlnA0NU0zSB3I52xkO4sFXk80VK9lXjLClu88= -github.com/felixge/fgprof v0.9.4/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= +github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= +github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/fogleman/gg v1.3.0/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fredbi/uri v0.1.0/go.mod h1:1xC40RnIOGCaQzswaOvrzvG/3M3F0hyDVb3aO/1iGy0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= +github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= +github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/fyne-io/gl-js v0.0.0-20220119005834-d2da28d9ccfe/go.mod h1:d4clgH0/GrRwWjRzJJQXxT/h1TyuNSfF/X64zb/3Ggg= github.com/fyne-io/glfw-js v0.0.0-20220120001248-ee7290d23504/go.mod h1:gLRWYfYnMA9TONeppRSikMdXlHQ97xVsPojddUv3b/E= github.com/fyne-io/image v0.0.0-20220602074514-4956b0afb3d2/go.mod h1:eO7W361vmlPOrykIg+Rsh1SZ3tQBaOsfzZhsIOb/Lm0= @@ -350,12 +383,11 @@ github.com/go-pdf/fpdf v1.4.3/go.mod h1:HzcnA+A23uwogo0tp9yU+l3V+KXhiESpt1PMayhO github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= -github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-playground/validator/v10 v10.23.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI= github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= -github.com/go-task/slim-sprig v2.20.0+incompatible h1:4Xh3bDzO29j4TWNOI+24ubc0vbVFMg2PMnXKxK54/CA= github.com/go-task/slim-sprig v2.20.0+incompatible/go.mod h1:N/mhXZITr/EQAOErEHciKvO1bFei2Lld2Ym6h96pdy0= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= @@ -374,10 +406,10 @@ github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA= github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= github.com/goccy/go-yaml v1.9.8/go.mod h1:JubOolP3gh0HpiBc4BLRD4YmjEjHAmIIB2aaXKkTfoE= github.com/goccy/go-yaml v1.11.0/go.mod h1:H+mJrWtjPTJAHvRbV09MCK9xYwODM+wRTVFFTWckfng= -github.com/gocql/gocql v1.6.0 h1:IdFdOTbnpbd0pDhl4REKQDM+Q0SzKXQ1Yh+YZZ8T/qU= -github.com/gocql/gocql v1.6.0/go.mod h1:3gM2c4D3AnkISwBxGnMMsS8Oy4y2lhbPRsH4xnJrHG8= -github.com/gocraft/dbr/v2 v2.7.6 h1:ASHKFgCbTLODbb9f756Cl8VAlnvQLKqIzx9E1Cfb7eo= -github.com/gocraft/dbr/v2 v2.7.6/go.mod h1:8IH98S8M8J0JSEiYk0MPH26ZDUKemiQ/GvmXL5jo+Uw= +github.com/gocql/gocql v1.7.0 h1:O+7U7/1gSN7QTEAaMEsJc1Oq2QHXvCWoF3DFK9HDHus= +github.com/gocql/gocql v1.7.0/go.mod h1:vnlvXyFZeLBF0Wy+RS8hrOdbn0UWsWtdg07XJnFxZ+4= +github.com/gocraft/dbr/v2 v2.7.7 h1:GyG0GvBnCXoNuZqgwJikN/FKPMflxnqb6dJHCwXseG0= +github.com/gocraft/dbr/v2 v2.7.7/go.mod h1:rW3YUVRncA5eL464O20jfJq2W2kCML5dNIBqGVD04gM= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= @@ -389,22 +421,23 @@ github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0 h1:DACJavvAHhabrF08vX0COfcOBJRhZ8lUbR+ZWIs0Y5g= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= -github.com/golang/glog v1.2.2/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ= +github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg= +github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/flatbuffers v23.5.26+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-pkcs11 v0.2.1-0.20230907215043-c6f79328ddf9/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= +github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY= github.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo= github.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI= github.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk= @@ -413,8 +446,8 @@ github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8 h1:FKHo8hFI3A+7w0aUQuYXQ+6EN5stWmeY/AZqtM8xk9k= -github.com/google/pprof v0.0.0-20240727154555-813a5fbdbec8/go.mod h1:K1liHPHnj73Fdn/EKuT8nrFqBihUSKXoLYU0BuatOYo= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= +github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/s2a-go v0.1.3/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.4/go.mod h1:Ej+mSEMGRnqRzjc7VtF+jdBwYG5fuJfiZ8ELkjEwM0A= github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= @@ -434,10 +467,12 @@ github.com/googleapis/enterprise-certificate-proxy v0.2.0/go.mod h1:8C0jb7/mgJe/ github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.2.4/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/enterprise-certificate-proxy v0.3.1/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s= -github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A= +github.com/googleapis/enterprise-certificate-proxy v0.3.3/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw= +github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA= +github.com/googleapis/gax-go/v2 v2.14.0 h1:f+jMrjBPl+DL9nI4IQzLUxMq7XrAqFYB7hBPqMNIe8o= +github.com/googleapis/gax-go/v2 v2.14.0/go.mod h1:lhBCnjdLrWRaPvLWhmc8IS24m9mr07qSYnHncrgo+zk= github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20211219123610-ec9572f70e60/go.mod h1:cz9oNYuRUWGdHmLF2IodMLkAhcPtXeULvcBNagUrxTI= github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= @@ -451,21 +486,18 @@ github.com/grafana/pyroscope-go/godeltaprof v0.1.8 h1:iwOtYXeeVSAeYefJNaxDytgjKt github.com/grafana/pyroscope-go/godeltaprof v0.1.8/go.mod h1:2+l7K7twW49Ct4wFluZD3tZ6e0SjanjcUUBPVD/UuGU= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0 h1:TmHmbvxPmaegwhDubVz0lICL0J5Ka2vwTzhoePEXsGE= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.24.0/go.mod h1:qztMSjm835F2bXf+5HKAPIS5qsmQDqZna/PgVt4rWtI= github.com/hamba/avro/v2 v2.17.2/go.mod h1:Q9YK+qxAhtVrNqOhwlZTATLgLA8qxG2vtvkhK8fJ7Jo= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/ianlancetaylor/demangle v0.0.0-20240805132620-81f5be970eca/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/ianlancetaylor/demangle v0.0.0-20240912202439-0a2b6291aafd/go.mod h1:gx7rwoVhcfuVKG5uya9Hs3Sxj7EIvldVofAWIUtGouw= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jackmordaunt/icns v0.0.0-20181231085925-4f16af745526/go.mod h1:UQkeMHVoNcyXYq9otUupF7/h/2tmHlhrS2zw7ZVvUqc= github.com/jackmordaunt/icns/v2 v2.2.1/go.mod h1:6aYIB9eSzyfHHMKqDf17Xrs1zetQPReAkiUSHzdw4cI= -github.com/jessevdk/go-flags v1.6.1/go.mod h1:Mk8T1hIAWpOiJiHa9rJASDK2UGWji0EuPGBnNLMooyc= github.com/jezek/xgb v1.0.0/go.mod h1:nrhwO0FX/enq75I7Y7G8iN1ubpSGZEiA3v9e9GyRFlk= github.com/jezek/xgb v1.1.1/go.mod h1:nrhwO0FX/enq75I7Y7G8iN1ubpSGZEiA3v9e9GyRFlk= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= @@ -484,19 +516,19 @@ github.com/jsummers/gobmp v0.0.0-20151104160322-e2ba15ffa76e/go.mod h1:kLgvv7o6U github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jung-kurt/gofpdf v1.16.2/go.mod h1:1hl7y57EsiPAkLbOwzpzqgx1A30nQCk/YmFV8S2vmK0= github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8= -github.com/kisielk/errcheck v1.7.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= +github.com/kisielk/errcheck v1.8.0/go.mod h1:1kLL+jV4e+CFfueBmI1dSK2ADDyQnlrnrY/FqKluHJQ= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= -github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b h1:4A/j6hb0Sd3VXqhNtgmUlcPy353Qaa0aIfAPcBrI1n8= -github.com/klauspost/compress v1.17.10-0.20240812095115-3868468e621b/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= -github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM= -github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= +github.com/klauspost/compress v1.17.12-0.20241216125714-bbaf27d0c3d9 h1:ahrb5AWJdyIosL02FO6NcIT+Dkf32flw6TsZd2IcFCc= +github.com/klauspost/compress v1.17.12-0.20241216125714-bbaf27d0c3d9/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= +github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY= +github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8= github.com/kpango/fastime v1.1.9 h1:xVQHcqyPt5M69DyFH7g1EPRns1YQNap9d5eLhl/Jy84= github.com/kpango/fastime v1.1.9/go.mod h1:vyD7FnUn08zxY4b/QFBZVG+9EWMYsNl+QF0uE46urD4= github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1 h1:rxyM+7uaZQ35P9fbixdnld/h4AgEhODoubuy6A4nDdk= github.com/kpango/fuid v0.0.0-20221203053508-503b5ad89aa1/go.mod h1:CAYeq6us9NfnRkSz67/xKVIR6/vaY5ZQZRe6IVcaIKg= -github.com/kpango/gache/v2 v2.0.9 h1:iov+hbPaKXDVjAFxmJ52Oso2VZyhcMiUkNlkq+OwylM= -github.com/kpango/gache/v2 v2.0.9/go.mod h1:5AWVWlHau1dwI9Hzf+NZc4rPTwxM3SVwJQgob/OyAjQ= +github.com/kpango/gache/v2 v2.1.1 h1:nOuVy7saIbs+tMtOyvPIf71Be2lUL88ymV7SQoICOkw= +github.com/kpango/gache/v2 v2.1.1/go.mod h1:c5WoO35SM5xq4x8K+QkTwh5xsjokfL3yKsLUUtDll+c= github.com/kpango/glg v1.6.15 h1:nw0xSxpSyrDIWHeb3dvnE08PW+SCbK+aYFETT75IeLA= github.com/kpango/glg v1.6.15/go.mod h1:cmsc7Yeu8AS3wHLmN7bhwENXOpxfq+QoqxCIk2FneRk= github.com/kpango/go-hostpool v0.0.0-20210303030322-aab80263dcd0 h1:orIEVdc68woWO1ZyYWEVOl5Kl33eDjP+kbxgbdpMwi4= @@ -522,13 +554,13 @@ github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i github.com/lucor/goinfo v0.0.0-20200401173949-526b5363a13a/go.mod h1:ORP3/rB5IsulLEBwQZCJyyV6niqmI7P4EWSmkug+1Ng= github.com/lucor/goinfo v0.0.0-20210802170112-c078a2b0f08b/go.mod h1:PRq09yoB+Q2OJReAmwzKivcYyremnibWGbK7WfftHzc= github.com/lyft/protoc-gen-star/v2 v2.0.4-0.20230330145011-496ad1ac90a4/go.mod h1:amey7yeodaJhXSbf/TlLvWiqQfLOSpEk//mLlc+axEk= -github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= -github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4= +github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU= github.com/mandolyte/mdtopdf v1.3.2/go.mod h1:c28Ldk+tVc/y7QQcEcILStS/OFlerdXGGdBUzJQBgEo= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-sqlite3 v1.14.22 h1:2gZY6PC6kBnID23Tichd1K+Z0oS6nE/XwU+Vz/5o4kU= -github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/mattn/go-sqlite3 v1.14.24 h1:tpSp2G2KyMnnQu99ngJ47EIkWVmliIizyZBfPrBWDRM= +github.com/mattn/go-sqlite3 v1.14.24/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo= github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= @@ -536,8 +568,8 @@ github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db/go.mod h1:l0 github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587 h1:HfkjXDfhgVaN5rmueG8cL8KKeFNecRCXFhaJ2qZ5SKA= -github.com/moby/term v0.0.0-20221205130635-1aeaba878587/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= @@ -556,10 +588,10 @@ github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY= github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.20.0 h1:PE84V2mHqoT1sglvHc8ZdQtPcwmvvt29WLEEO3xmdZw= -github.com/onsi/ginkgo/v2 v2.20.0/go.mod h1:lG9ey2Z29hR41WMVthyJBGUBcBhGOtoPF2VFMvBXFCI= -github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= -github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= +github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg= +github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw= +github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phpdave11/gofpdf v1.4.2/go.mod h1:zpO6xFn9yxo3YLyMvW8HcKWVdbNqgIfOOp2dXMnm1mY= @@ -570,19 +602,19 @@ github.com/pierrec/lz4/v3 v3.3.5/go.mod h1:280XNCGS8jAcG++AHdd6SeWnzyJ1w9oow2vbO github.com/pierrec/lz4/v4 v4.1.18/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/sftp v1.13.6/go.mod h1:tz1ryNURKu77RL+GuCzmoJYxQczL3wLNNpPWagdg4Qk= +github.com/pkg/sftp v1.13.7/go.mod h1:KMKI0t3T6hfA+lTR/ssZdunHo+uwq7ghoN09/FSu3DY= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo= github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/client_golang v1.20.0 h1:jBzTZ7B099Rg24tny+qngoynol8LtVYlA2bqx3vEloI= -github.com/prometheus/client_golang v1.20.0/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= +github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y= +github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE= github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc= -github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8= +github.com/prometheus/common v0.61.0 h1:3gv/GThfX0cV2lpO7gkTUwZru38mxevy90Bj8YFSRQQ= +github.com/prometheus/common v0.61.0/go.mod h1:zr29OCN/2BsJRaFwG8QOBr41D6kkchKbpeNH7pAjb/s= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/quasilyte/go-ruleguard v0.4.2 h1:htXcXDK6/rO12kiTHKfHuqR4kr3Y4M0J0rOL6CH/BYs= @@ -593,11 +625,13 @@ github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOA github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 h1:M8mH9eK4OUR4lu7Gd+PU1fV2/qnDNfzT635KRSObncs= github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567/go.mod h1:DWNGW8A4Y+GyBgPuaQJuWiy0XYftx4Xm/y5Jqk9I6VQ= +github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE= +github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs= github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= -github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/ruudk/golang-pdf417 v0.0.0-20201230142125-a7e3863a1245/go.mod h1:pQAZKsJ8yyVxGRWYNEm9oFB8ieLgKFnamEyDmSA0BRk= github.com/schollz/progressbar/v2 v2.15.0/go.mod h1:UdPq3prGkfQ7MOzZKlDRpYKcFqEMczbD7YmbPgpzKMI= @@ -627,15 +661,19 @@ github.com/srwiley/rasterx v0.0.0-20220128185129-2efea2b9ea41/go.mod h1:nXTWP6+g github.com/stoewer/go-strcase v1.3.0/go.mod h1:fAH5hQ5pehh+j3nZfvwdk2RgEgQjAoM8wodgtPmh1xo= github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/substrait-io/substrait-go v0.4.2/go.mod h1:qhpnLmrcvAnlZsUyPXZRqldiHapPTXC3t7xFgDi3aQg= github.com/tevino/abool v1.2.0/go.mod h1:qc66Pna1RiIsPa7O4Egxxs9OqkuxDX55zznh9K07Tzg= github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk= github.com/tidwall/match v1.1.1/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28= +github.com/unum-cloud/usearch/golang v0.0.0-20241213232841-3964f8392443 h1:nhcDBsog6WxdpMd7nO+4kSwD2zW63PolAmBMpyGEoxU= +github.com/unum-cloud/usearch/golang v0.0.0-20241213232841-3964f8392443/go.mod h1:NxBpQibuBBeA/V8RGbrNzVAv4OyWWL5yNao7mVz656k= github.com/urfave/cli/v2 v2.4.0/go.mod h1:NX9W0zmTvedE5oDoOMs2RTC8RvdK98NTYZE5LbaEYPg= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/xlab/treeprint v1.2.0 h1:HzHnuAF1plUN2zGlAFHbSQP2qJ0ZAD3XF5XD7OesXRQ= github.com/xlab/treeprint v1.2.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= @@ -643,109 +681,122 @@ github.com/zeebo/assert v1.3.1 h1:vukIABvugfNMZMQO1ABsyQDJDTVQbn+LWSMy1ol1h6A= github.com/zeebo/assert v1.3.1/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= -go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= -go.etcd.io/bbolt v1.3.10 h1:+BqfJTcCzTItrop8mq/lbzL8wSGtj94UO/3U31shqG0= -go.etcd.io/bbolt v1.3.10/go.mod h1:bK3UQLPJZly7IlNmV7uVHJDxfe5aK9Ll93e/74Y9oEQ= +go.einride.tech/aip v0.68.0/go.mod h1:7y9FF8VtPWqpxuAxl0KQWqaULxW4zFIesD6zF5RIHHg= +go.etcd.io/bbolt v1.3.11 h1:yGEzV1wPz2yVCLsD8ZAiGHhHVlczyC9d1rP43/VCRJ0= +go.etcd.io/bbolt v1.3.11/go.mod h1:dksAq7YMXoljX0xu6VF5DMZGbhYYoLUalEiSySYAS4I= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0 h1:9G6E0TXzGFVfTnawRzrPl83iHOAV7L8NJiR8RSGYV1g= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.53.0/go.mod h1:azvtTADFQJA8mX80jIH/akaE7h+dbm/sVuaHqN13w74= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/detectors/gcp v1.28.0/go.mod h1:9BIqH22qyHWAiZxQh0whuJygro59z+nbMVuc7ciiGug= +go.opentelemetry.io/contrib/detectors/gcp v1.29.0/go.mod h1:GW2aWZNwR2ZxDLdv8OyC2G8zkRoQBuURgV7RPQgcPoU= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0 h1:G1JQOreVrfhRkner+l4mrGxmfqYCAuy76asTDAo0xsA= +go.opentelemetry.io/contrib/detectors/gcp v1.31.0/go.mod h1:tzQL6E1l+iV44YFTkcAeNQqzXUiekSYP9jjJjXwEd00= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA= go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg= -go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= -go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= -go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= -go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= -go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= -go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= -go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= -go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= -go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= -go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= -go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= -go.starlark.net v0.0.0-20240725214946-42030a7cedce h1:YyGqCjZtGZJ+mRPaenEiB87afEO2MFRzLiJNZ0Z0bPw= -go.starlark.net v0.0.0-20240725214946-42030a7cedce/go.mod h1:YKMCv9b1WrfWmeqdV5MAuEHWsu5iC+fe6kYl2sQjdI8= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0 h1:TT4fX+nBOA/+LUkobKGW1ydGcn+G3vRw9+g5HwCphpk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.54.0/go.mod h1:L7UH0GbB0p47T4Rri3uHjbpCFYrVrwc1I25QhNPiGK8= +go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw= +go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0 h1:7F29RDmnlqk6B5d+sUqemt8TBfDqxryYW5gX6L74RFA= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.33.0/go.mod h1:ZiGDq7xwDMKmWDrN1XsXAj0iC7hns+2DhxBFSncNHSE= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0 h1:Vh5HayB/0HHfOQA7Ctx69E/Y/DcQSMPpKANYVMQ7fBA= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.33.0/go.mod h1:cpgtDBaqD/6ok/UG0jT15/uKjAY8mRA53diogHBg3UI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0 h1:5pojmb1U1AogINhN3SurB+zm/nIcusopeBNp42f45QM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.33.0/go.mod h1:57gTHJSE5S1tqg+EKsLPlTWhpHMsWlVmer+LA926XiA= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I= +go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ= +go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M= +go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM= +go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM= +go.opentelemetry.io/otel/sdk/metric v1.33.0 h1:Gs5VK9/WUJhNXZgn8MR6ITatvAmKeIuCtNbsP3JkNqU= +go.opentelemetry.io/otel/sdk/metric v1.33.0/go.mod h1:dL5ykHZmm1B1nVRk9dDjChwDmt81MjVp3gLkQRwKf/Q= +go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s= +go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck= +go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg= +go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= -go.uber.org/automaxprocs v1.5.3 h1:kWazyxZUrS3Gs4qUpbwo5kEIMGe/DAvi5Z4tl2NW4j8= -go.uber.org/automaxprocs v1.5.3/go.mod h1:eRbA25aqJrxAbsLO0xy5jVwPt7FQnRgjW+efnwa1WM0= +go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= +go.uber.org/automaxprocs v1.6.0/go.mod h1:ifeIMSnPZuznNm6jmdzmU3/bfk01Fe2fotchwEFJ8r8= go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= +go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/ratelimit v0.3.1 h1:K4qVE+byfv/B3tC+4nYWP7v/6SimcO7HzHekoMNBma0= go.uber.org/ratelimit v0.3.1/go.mod h1:6euWsTB6U/Nb3X++xEUXA8ciPJvr19Q/0h1+oDcJhRk= go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8= go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E= -gocloud.dev v0.39.0 h1:EYABYGhAalPUaMrbSKOr5lejxoxvXj99nE8XFtsDgds= -gocloud.dev v0.39.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa h1:ELnwvuAXPNtPk1TJRuGkI9fDTwym6AYBu0qzT8AcHdI= -golang.org/x/exp v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ= +gocloud.dev v0.40.0 h1:f8LgP+4WDqOG/RXoUcyLpeIAGOcAbZrZbDQCUee10ng= +gocloud.dev v0.40.0/go.mod h1:drz+VyYNBvrMTW0KZiBAYEdl8lbNZx+OQ7oQvdrFmSQ= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67 h1:1UoZQm6f0P/ZO0w1Ri+f+ifG/gXhegadRdwBIXEFWDo= +golang.org/x/exp v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c= golang.org/x/exp/shiny v0.0.0-20220827204233-334a2380cb91/go.mod h1:VjAR7z0ngyATZTELrBSkxOOHhhlnVUxDye4mcjx5h/8= -golang.org/x/exp/shiny v0.0.0-20230801115018-d63ba01acd4b/go.mod h1:UH99kUObWAZkDnWqppdQe5ZhPYESUw8I0zVV1uWBR+0= golang.org/x/exp/shiny v0.0.0-20230817173708-d852ddb80c63/go.mod h1:UH99kUObWAZkDnWqppdQe5ZhPYESUw8I0zVV1uWBR+0= golang.org/x/exp/shiny v0.0.0-20240707233637-46b078467d37/go.mod h1:3F+MieQB7dRYLTmnncoFbb1crS5lfQoTfDgQy6K4N0o= -golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa h1:54T+HVkPu4D3lltpEHyI3Fs2pG/GqjGkXLgyKOmifXk= -golang.org/x/exp/typeparams v0.0.0-20240808152545-0cdaa3abc0fa/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= -golang.org/x/image v0.19.0 h1:D9FX4QWkLfkeqaC62SonffIIuYdOk/UE2XKUBgRIBIQ= -golang.org/x/image v0.19.0/go.mod h1:y0zrRqlQRWQ5PXaYCOMLTW2fpsxZ8Qh9I/ohnInJEys= -golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= -golang.org/x/mobile v0.0.0-20240806205939-81131f6468ab/go.mod h1:udWezQGYjqrCxz5nV321pXQTx5oGbZx+khZvFjZNOPM= -golang.org/x/mod v0.20.0 h1:utOm6MM3R3dnawAiJgn0y+xvuYRsm1RKM/4giyfDgV0= -golang.org/x/mod v0.20.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= -golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= -golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= -golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= -golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= -golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= -golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/exp/shiny v0.0.0-20241009180824-f66d83c29e7c/go.mod h1:3F+MieQB7dRYLTmnncoFbb1crS5lfQoTfDgQy6K4N0o= +golang.org/x/exp/typeparams v0.0.0-20241217172543-b2144cdd0a67 h1:aOkGQa5iWYZjkoBaUQ8KyQfznXDSSumUfxSlEWSnmIM= +golang.org/x/exp/typeparams v0.0.0-20241217172543-b2144cdd0a67/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/image v0.23.0 h1:HseQ7c2OpPKTPVzNjG5fwJsOTCiiwS4QdsYi5XU6H68= +golang.org/x/image v0.23.0/go.mod h1:wJJBTdLfCCf3tiHa1fNxpZmUI4mmoZvwMCPP0ddoNKY= +golang.org/x/lint v0.0.0-20241112194109-818c5a804067/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20241213221354-a87c1cf6cf46/go.mod h1:Sf9LBimL0mWKEdgAjRmJ6iu7Z34osHQTK/devqFbM2I= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= +golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240521205824-bda55230c457/go.mod h1:pRgIJT+bRLFKnoM1ldnzKoxTIn14Yxz928LQRYYgIN0= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= -golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= -golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= -golang.org/x/tools v0.24.0 h1:J1shsA93PJUEVaUSaay7UXAyE8aimq3GW0pjlolpa24= -golang.org/x/tools v0.24.0/go.mod h1:YhNqVBIfWHdzvTLs0d8LCuMhkKUgSUKldakyV7W/WDQ= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9 h1:LLhsEBxRTBLuKlQxFBYUOU8xyFgXv6cOTp2HASDlsDk= -golang.org/x/xerrors v0.0.0-20240716161551-93cc26a95ae9/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= +golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= +golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= +golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= gonum.org/v1/gonum v0.15.1 h1:FNy7N6OUZVUaWG9pTiD+jlhdQ3lMP+/LcTpJ6+a8sQ0= gonum.org/v1/gonum v0.15.1/go.mod h1:eZTZuRFrzu5pcyjN5wJhcIhnUdNijYxX1T2IcrOGY0o= gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946 h1:vJpL69PeUullhJyKtTjHjENEmZU3BkO4e+fod7nKzgM= gonum.org/v1/hdf5 v0.0.0-20210714002203-8c5d23bc6946/go.mod h1:BQUWDHIAygjdt1HnUPQ0eWqLN2n5FwJycrpYUVUOx2I= -gonum.org/v1/plot v0.14.0 h1:+LBDVFYwFe4LHhdP8coW6296MBEY4nQ+Y4vuUpJopcE= -gonum.org/v1/plot v0.14.0/go.mod h1:MLdR9424SJed+5VqC6MsouEpig9pZX2VZ57H9ko2bXU= -google.golang.org/api v0.192.0 h1:PljqpNAfZaaSpS+TnANfnNAXKdzHM/B9bKhwRlo7JP0= -google.golang.org/api v0.192.0/go.mod h1:9VcphjvAxPKLmSxVSzPlSRXy/5ARMEw5bf58WoVXafQ= +gonum.org/v1/plot v0.15.0 h1:SIFtFNdZNWLRDRVjD6CYxdawcpJDWySZehJGpv1ukkw= +gonum.org/v1/plot v0.15.0/go.mod h1:3Nx4m77J4T/ayr/b8dQ8uGRmZF6H3eTqliUExDrQHnM= +google.golang.org/api v0.213.0 h1:KmF6KaDyFqB417T68tMPbVmmwtIXs2VB60OJKIHB0xQ= +google.golang.org/api v0.213.0/go.mod h1:V0T5ZhNUUNpYAlL306gFZPFt5F5D/IeyLoktduYYnvQ= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142 h1:oLiyxGgE+rt22duwci1+TG7bg2/L1LQsXwfjPlmuJA0= -google.golang.org/genproto v0.0.0-20240814211410-ddb44dafa142/go.mod h1:G11eXq53iI5Q+kyNOmCvnzBaxEA2Q/Ik5Tj7nqBE8j4= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142 h1:wKguEg1hsxI2/L3hUYrpo1RVi48K+uTyzKqprwLXsb8= -google.golang.org/genproto/googleapis/api v0.0.0-20240814211410-ddb44dafa142/go.mod h1:d6be+8HhtEtucleCbxpPW9PA9XwISACu8nvpPqF0BVo= -google.golang.org/genproto/googleapis/bytestream v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:5/MT647Cn/GGhwTpXC7QqcaR5Cnee4v4MKCU1/nwnIQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142 h1:e7S5W7MGGLaSu8j3YjdezkZ+m1/Nm0uRVRMEMGk26Xs= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240814211410-ddb44dafa142/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU= -google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= -google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= -google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= -google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +google.golang.org/genproto v0.0.0-20241216192217-9240e9c98484 h1:a/U5otbGrI6mYIO598WriFB1172i6Ktr6FGcatZD3Yw= +google.golang.org/genproto v0.0.0-20241216192217-9240e9c98484/go.mod h1:Gmd/M/W9fEyf6VSu/mWLnl+9Be51B9CLdxdsKokYq7Y= +google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484 h1:ChAdCYNQFDk5fYvFZMywKLIijG7TC2m1C2CMEu11G3o= +google.golang.org/genproto/googleapis/api v0.0.0-20241216192217-9240e9c98484/go.mod h1:KRUmxRI4JmbpAm8gcZM4Jsffi859fo5LQjILwuqj9z8= +google.golang.org/genproto/googleapis/bytestream v0.0.0-20241209162323-e6fa225c2576/go.mod h1:qUsLYwbwz5ostUWtuFuXPlHmSJodC5NI/88ZlHj4M1o= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484 h1:Z7FRVJPSMaHQxD0uXU8WdgFh8PseLM8Q8NzhnpMrBhQ= +google.golang.org/genproto/googleapis/rpc v0.0.0-20241216192217-9240e9c98484/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/grpc v1.69.2 h1:U3S9QEtbXC0bYNvRtcoklF3xGtLViumSYxWykJS+7AU= +google.golang.org/grpc v1.69.2/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA= +google.golang.org/grpc/stats/opentelemetry v0.0.0-20240907200651-3ffb98b2c93a/go.mod h1:9i1T9n4ZinTUZGgzENMi8MDDgbGC5mqTS75JAv6xN3A= +google.golang.org/protobuf v1.36.0 h1:mjIs9gYtt56AzC4ZaffQuh88TZurBGhIJMBZGSxNerQ= +google.golang.org/protobuf v1.36.0/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= @@ -757,24 +808,24 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= honnef.co/go/js/dom v0.0.0-20210725211120-f030747120f2/go.mod h1:sUMDUKNB2ZcVjt92UnLy3cdGs+wDAcrPdV3JP6sVgA4= honnef.co/go/tools v0.5.1/go.mod h1:e9irvo83WDG9/irijV44wr3tbhcFeRnfpVlRqVwpzMs= -k8s.io/api v0.30.3 h1:ImHwK9DCsPA9uoU3rVh4QHAHHK5dTSv1nxJUapx8hoQ= -k8s.io/api v0.30.3/go.mod h1:GPc8jlzoe5JG3pb0KJCSLX5oAFIW3/qNJITlDj8BH04= -k8s.io/apiextensions-apiserver v0.30.3 h1:oChu5li2vsZHx2IvnGP3ah8Nj3KyqG3kRSaKmijhB9U= -k8s.io/apiextensions-apiserver v0.30.3/go.mod h1:uhXxYDkMAvl6CJw4lrDN4CPbONkF3+XL9cacCT44kV4= -k8s.io/apimachinery v0.30.3 h1:q1laaWCmrszyQuSQCfNB8cFgCuDAoPszKY4ucAjDwHc= -k8s.io/apimachinery v0.30.3/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/cli-runtime v0.30.3 h1:aG69oRzJuP2Q4o8dm+f5WJIX4ZBEwrvdID0+MXyUY6k= -k8s.io/cli-runtime v0.30.3/go.mod h1:hwrrRdd9P84CXSKzhHxrOivAR9BRnkMt0OeP5mj7X30= -k8s.io/client-go v0.30.3 h1:bHrJu3xQZNXIi8/MoxYtZBBWQQXwy16zqJwloXXfD3k= -k8s.io/client-go v0.30.3/go.mod h1:8d4pf8vYu665/kUbsxWAQ/JDBNWqfFeZnvFiVdmx89U= +k8s.io/api v0.32.0 h1:OL9JpbvAU5ny9ga2fb24X8H6xQlVp+aJMFlgtQjR9CE= +k8s.io/api v0.32.0/go.mod h1:4LEwHZEf6Q/cG96F3dqR965sYOfmPM7rq81BLgsE0p0= +k8s.io/apiextensions-apiserver v0.32.0 h1:S0Xlqt51qzzqjKPxfgX1xh4HBZE+p8KKBq+k2SWNOE0= +k8s.io/apiextensions-apiserver v0.32.0/go.mod h1:86hblMvN5yxMvZrZFX2OhIHAuFIMJIZ19bTvzkP+Fmw= +k8s.io/apimachinery v0.32.0 h1:cFSE7N3rmEEtv4ei5X6DaJPHHX0C+upp+v5lVPiEwpg= +k8s.io/apimachinery v0.32.0/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/cli-runtime v0.32.0 h1:dP+OZqs7zHPpGQMCGAhectbHU2SNCuZtIimRKTv2T1c= +k8s.io/cli-runtime v0.32.0/go.mod h1:Mai8ht2+esoDRK5hr861KRy6z0zHsSTYttNVJXgP3YQ= +k8s.io/client-go v0.32.0 h1:DimtMcnN/JIKZcrSrstiwvvZvLjG0aSxy8PxN8IChp8= +k8s.io/client-go v0.32.0/go.mod h1:boDWvdM1Drk4NJj/VddSLnx59X3OPgwrOo0vGbtq9+8= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34 h1:/amS69DLm09mtbFtN3+LyygSFohnYGMseF8iv+2zulg= -k8s.io/kube-openapi v0.0.0-20240816214639-573285566f34/go.mod h1:G0W3eI9gG219NHRq3h5uQaRBl4pj4ZpwzRP5ti8y770= -k8s.io/metrics v0.30.3 h1:gKCpte5zykrOmQhZ8qmsxyJslMdiLN+sqbBfIWNpbGM= -k8s.io/metrics v0.30.3/go.mod h1:W06L2nXRhOwPkFYDJYWdEIS3u6JcJy3ebIPYbndRs6A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8 h1:pUdcCO1Lk/tbT5ztQWOBi5HBgbBP1J8+AsQnQCKsi8A= -k8s.io/utils v0.0.0-20240711033017-18e509b52bc8/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7 h1:hcha5B1kVACrLujCKLbr8XWMxCxzQx42DY8QKYJrDLg= +k8s.io/kube-openapi v0.0.0-20241212222426-2c72e554b1e7/go.mod h1:GewRfANuJ70iYzvn+i4lezLDAFzvjxZYK1gn1lWcfas= +k8s.io/metrics v0.32.0 h1:70qJ3ZS/9DrtH0UA0NVBI6gW2ip2GAn9e7NtoKERpns= +k8s.io/metrics v0.32.0/go.mod h1:skdg9pDjVjCPIQqmc5rBzDL4noY64ORhKu9KCPv1+QI= +k8s.io/utils v0.0.0-20241210054802-24370beab758 h1:sdbE21q2nlQtFh65saZY+rRM6x6aJJI8IUa1AmH/qa0= +k8s.io/utils v0.0.0-20241210054802-24370beab758/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= lukechampine.com/uint128 v1.3.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk= @@ -809,15 +860,15 @@ modernc.org/z v1.7.0/go.mod h1:hVdgNMh8ggTuRG1rGU8x+xGRFfiQUIAw0ZqlPy8+HyQ= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/pdf v0.1.1 h1:k1MczvYDUvJBe93bYd7wrZLLUEcLZAuF824/I4e5Xr4= rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= -sigs.k8s.io/controller-runtime v0.18.4 h1:87+guW1zhvuPLh1PHybKdYFLU0YJp4FhJRmiHvm5BZw= -sigs.k8s.io/controller-runtime v0.18.4/go.mod h1:TVoGrfdpbA9VRFaRnKgk9P5/atA0pMwq+f+msb9M8Sg= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3 h1:XX3Ajgzov2RKUdc5jW3t5jwY7Bo7dcRm+tFxT+NfgY0= -sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3/go.mod h1:9n16EZKMhXBNSiUC5kSdFQJkdH3zbxS/JoO619G1VAY= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 h1:W6cLQc5pnqM7vh3b7HvGNfXrJ/xL6BDMS0v1V/HHg5U= -sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3/go.mod h1:JWP1Fj0VWGHyw3YUPjXSQnRnrwezrZSrApfX5S0nIag= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +sigs.k8s.io/controller-runtime v0.19.3 h1:XO2GvC9OPftRst6xWCpTgBZO04S2cbp0Qqkj8bX1sPw= +sigs.k8s.io/controller-runtime v0.19.3/go.mod h1:j4j87DqtsThvwTv5/Tc5NFRyyF/RF0ip4+62tbTSIUM= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8 h1:gBQPwqORJ8d8/YNZWEjoZs7npUVDpVXUUOFfW6CgAqE= +sigs.k8s.io/json v0.0.0-20241014173422-cfa47c3a1cc8/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg= +sigs.k8s.io/kustomize/api v0.18.0 h1:hTzp67k+3NEVInwz5BHyzc9rGxIauoXferXyjv5lWPo= +sigs.k8s.io/kustomize/api v0.18.0/go.mod h1:f8isXnX+8b+SGLHQ6yO4JG1rdkZlvhaCf/uZbLVMb0U= +sigs.k8s.io/kustomize/kyaml v0.18.1 h1:WvBo56Wzw3fjS+7vBjN6TeivvpbW9GmRaWZ9CIVmt4E= +sigs.k8s.io/kustomize/kyaml v0.18.1/go.mod h1:C3L2BFVU1jgcddNBE1TxuVLgS46TjObMwW5FT9FcjYo= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0 h1:nbCitCK2hfnhyiKo6uf2HxUPTCodY6Qaf85SbDIaMBk= +sigs.k8s.io/structured-merge-diff/v4 v4.5.0/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/hack/docker/gen/main.go b/hack/docker/gen/main.go index 3c7bdd40a1..811f7d12ff 100644 --- a/hack/docker/gen/main.go +++ b/hack/docker/gen/main.go @@ -23,22 +23,128 @@ import ( "io/fs" "os" "os/signal" - "regexp" "slices" "syscall" "text/template" "time" + "github.com/vdaas/vald/internal/conv" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/file" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/strings" + "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/internal/sync/errgroup" + "golang.org/x/tools/go/packages" + "gopkg.in/yaml.v2" ) -var tmpl = fmt.Sprintf(`# syntax = docker/dockerfile:latest -# +const ( + agent = "agent" + agentFaiss = agent + "-faiss" + agentNGT = agent + "-ngt" + agentSidecar = agent + "-sidecar" + benchJob = "benchmark-job" + benchOperator = "benchmark-operator" + binfmt = "binfmt" + buildbase = "buildbase" + buildkit = "buildkit" + buildkitSyftScanner = buildkit + "-syft-scanner" + ciContainer = "ci-container" + devContainer = "dev-container" + exampleContainer = "example-client" + discovererK8s = "discoverer-k8s" + gateway = "gateway" + gatewayFilter = gateway + "-filter" + gatewayLb = gateway + "-lb" + gatewayMirror = gateway + "-mirror" + helmOperator = "helm-operator" + indexCorrection = "index-correction" + indexCreation = "index-creation" + indexDeletion = "index-deletion" + indexOperator = "index-operator" + indexSave = "index-save" + loadtest = "loadtest" + managerIndex = "manager-index" + readreplicaRotate = "readreplica-rotate" + + organization = "vdaas" + repository = "vald" + defaultBinaryDir = "/usr/bin" + usrLocal = "/usr/local" + usrLocalBinaryDir = usrLocal + "/bin" + usrLocalLibDir = usrLocal + "/lib" + defaultBuilderImage = "ghcr.io/" + organization + "/" + repository + "/" + repository + "-" + buildbase + defaultBuilderTag = "nightly" + defaultLanguage = "en_US.UTF-8" + defaultMaintainer = organization + ".org " + repository + " team <" + repository + "@" + organization + ".org>" + defaultRuntimeImage = "gcr.io/distroless/static" + nonrootUser = "nonroot" + rootUser = "root" + defaultRuntimeTag = nonrootUser + defaultRuntimeUser = nonrootUser + ":" + nonrootUser + defaultBuildUser = rootUser + ":" + rootUser + defaultBuildStageName = "builder" + maintainerKey = "MAINTAINER" + minimumArgumentLength = 2 + ubuntuVersion = "24.04" + + goWorkdir = "${GOPATH}/src/github.com" + rustWorkdir = "${HOME}/rust/src/github.com" + + agentInernalPackage = "pkg/agent/internal" + + ngtPreprocess = "make ngt/install" + faissPreprocess = "make faiss/install" + usearchPreprocess = "make usearch/install" + + helmOperatorRootdir = "/opt/helm" + helmOperatorWatchFile = helmOperatorRootdir + "/watches.yaml" + helmOperatorChartsDir = helmOperatorRootdir + "/charts" + + apisProtoPath = "apis/proto/**" + + hackPath = "hack/**" + + chartsValdPath = "charts/vald" + helmOperatorPath = chartsValdPath + "-helm-operator" + chartPath = chartsValdPath + "/Chart.yaml" + valuesPath = chartsValdPath + "/values.yaml" + templatesPath = chartsValdPath + "/templates/**" + helmOperatorChartPath = helmOperatorPath + "/Chart.yaml" + helmOperatorValuesPath = helmOperatorPath + "/values.yaml" + helmOperatorTemplatesPath = helmOperatorPath + "/templates/**" + + goModPath = "go.mod" + goSumPath = "go.sum" + + cargoLockPath = "rust/Cargo.lock" + cargoTomlPath = "rust/Cargo.toml" + rustBinAgentDirPath = "rust/bin/agent" + rustNgtRsPath = "rust/libs/ngt-rs/**" + rustNgtPath = "rust/libs/ngt/**" + rustProtoPath = "rust/libs/proto/**" + + excludeTestFilesPath = "!**/*_test.go" + excludeMockFilesPath = "!**/*_mock.go" + + versionsPath = "versions" + operatorSDKVersionPath = versionsPath + "/OPERATOR_SDK_VERSION" + goVersionPath = versionsPath + "/GO_VERSION" + rustVersionPath = versionsPath + "/RUST_VERSION" + faissVersionPath = versionsPath + "/FAISS_VERSION" + ngtVersionPath = versionsPath + "/NGT_VERSION" + usearchVersionPath = versionsPath + "/USEARCH_VERSION" + + makefilePath = "Makefile" + makefileDirPath = makefilePath + ".d/**" + + amd64Platform = "linux/amd64" + arm64Platform = "linux/arm64" + multiPlatforms = amd64Platform + "," + arm64Platform + + header = `# # Copyright (C) 2019-{{.Year}} {{.Maintainer}} # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -52,7 +158,55 @@ var tmpl = fmt.Sprintf(`# syntax = docker/dockerfile:latest # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -# +#` +) + +var license = template.Must(template.New("license").Parse(header + ` + +# DO_NOT_EDIT this workflow file is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go + +`)) + +var docker = template.Must(template.New("Dockerfile").Funcs(template.FuncMap{ + "RunCommands": func(commands []string) string { + if len(commands) == 0 { + return "" + } + var b strings.Builder + for i, cmd := range commands { + if i > 0 { + b.WriteString(" \\\n && ") + } + b.WriteString(cmd) + } + return b.String() + }, + "RunMounts": func(commands []string) string { + if len(commands) == 0 { + return "" + } + var b strings.Builder + for i, cmd := range commands { + if i > 0 { + b.WriteString(" \\\n ") + } + b.WriteString(cmd) + } + return b.String() + }, + + "Entrypoint": func(entries []string) string { + if len(entries) == 0 { + return "\"{{.BinDir}}/{{.AppName}}\"" + } + return "\"" + strings.Join(entries, "\", \"") + "\"" + }, + "ContainerName": func(c ContainerType) string { + return c.String() + }, +}).Parse(fmt.Sprintf(`# syntax = docker/dockerfile:latest +# check=error=true +%s # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go @@ -86,26 +240,19 @@ COPY {{$files}} {{- end}} SHELL ["/bin/bash", "-o", "pipefail", "-c"] #skipcq: DOK-W1001, DOK-SC2046, DOK-SC2086, DOK-DL3008 -RUN {{RunMounts .RunMounts}}\ - echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ +RUN {{RunMounts .RunMounts}} \ + set -ex \ + && echo 'Binary::apt::APT::Keep-Downloaded-Packages "true";' > /etc/apt/apt.conf.d/keep-cache \ && echo 'APT::Install-Recommends "false";' > /etc/apt/apt.conf.d/no-install-recommends \ && apt-get clean \ && apt-get update -y \ && apt-get upgrade -y \ -{{- if eq (ContainerName .ContainerType) "%s"}} - && apt-get install -y --no-install-recommends --fix-missing \ - curl \ - gnupg \ - software-properties-common \ - && add-apt-repository ppa:ubuntu-toolchain-r/test -y \ - && apt-get update -y \ - && apt-get upgrade -y \ -{{- end}} && apt-get install -y --no-install-recommends --fix-missing \ build-essential \ ca-certificates \ -{{- if not (eq (ContainerName .ContainerType) "%s")}} curl \ +{{- if eq (ContainerName .ContainerType) "%s"}} + gnupg \ {{- end}} tzdata \ locales \ @@ -142,111 +289,88 @@ ENTRYPOINT [{{Entrypoint .Entrypoints}}] {{- else if and (not (eq (ContainerName .ContainerType) "%s")) (not (eq (ContainerName .ContainerType) "%s"))}} ENTRYPOINT ["{{.BinDir}}/{{.AppName}}"] {{- end}} -{{- end}}`, DevContainer.String(), CIContainer.String(), - DevContainer.String(), +{{- end}}`, header, DevContainer.String(), CIContainer.String(), DevContainer.String(), DevContainer.String(), CIContainer.String(), - DevContainer.String(), CIContainer.String()) + DevContainer.String(), CIContainer.String()))) -var docker = template.Must(template.New("Dockerfile").Funcs(template.FuncMap{ - "RunCommands": func(commands []string) string { - if len(commands) == 0 { - return "" - } - var b strings.Builder - for i, cmd := range commands { - if i > 0 { - b.WriteString(" \\\n && ") - } - b.WriteString(cmd) - } - return b.String() - }, - "RunMounts": func(commands []string) string { - if len(commands) == 0 { - return "" - } - var b strings.Builder - for i, cmd := range commands { - if i > 0 { - b.WriteString(" \\\n ") - } - b.WriteString(cmd) - } - return b.String() - }, +type ( + Workflow struct { + Name string `yaml:"name"` + On On `yaml:"on"` + Jobs Jobs `yaml:"jobs"` + } - "Entrypoint": func(entries []string) string { - if len(entries) == 0 { - return "\"{{.BinDir}}/{{.AppName}}\"" - } - return "\"" + strings.Join(entries, "\", \"") + "\"" - }, - "ContainerName": func(c ContainerType) string { - return c.String() - }, -}).Parse(tmpl)) - -type Data struct { - AliasImage bool - ConfigExists bool - Year int - ContainerType ContainerType - AppName string - BinDir string - BuildUser string - BuilderImage string - BuilderTag string - BuildStageName string - Maintainer string - PackageDir string - RootDir string - RuntimeImage string - RuntimeTag string - RuntimeUser string - Arguments map[string]string - Environments map[string]string - Entrypoints []string - EnvironmentsSlice []string - ExtraCopies []string - ExtraImages []string - ExtraPackages []string - Preprocess []string - RunCommands []string - RunMounts []string - StageFiles []string -} + On struct { + Schedule Schedule `yaml:"schedule,omitempty"` + Push Push `yaml:"push"` + PullRequest PullRequest `yaml:"pull_request"` + PullRequestTarget PullRequest `yaml:"pull_request_target"` + } + + Schedule []struct { + Cron string `yaml:"cron,omitempty"` + } -type ContainerType int + Push struct { + Branches []string `yaml:"branches"` + Tags []string `yaml:"tags"` + } -const ( - organization = "vdaas" - repository = "vald" - defaultBinaryDir = "/usr/bin" - defaultBuilderImage = "ghcr.io/vdaas/vald/vald-buildbase" - defaultBuilderTag = "nightly" - defaultLanguage = "en_US.UTF-8" - defaultMaintainer = organization + ".org " + repository + " team <" + repository + "@" + organization + ".org>" - defaultRuntimeImage = "gcr.io/distroless/static" - defaultRuntimeTag = "nonroot" - defaultRuntimeUser = "nonroot:nonroot" - defaultBuildUser = "root:root" - defaultBuildStageName = "builder" - maintainerKey = "MAINTAINER" - minimumArgumentLength = 2 - ubuntuVersion = "22.04" + PullRequest struct { + Paths Paths `yaml:"paths"` + } - goWorkdir = "${GOPATH}/src/github.com" - rustWorkdir = "${HOME}/rust/src/github.com" + Jobs struct { + Build Build `yaml:"build"` + } - agentInernalPackage = "pkg/agent/internal" + Build struct { + Uses string `yaml:"uses"` + With With `yaml:"with"` + Secrets string `yaml:"secrets"` + } + + With struct { + Target string `yaml:"target"` + Platforms string `yaml:"platforms,omitempty"` + } - ngtPreprocess = "make ngt/install" - faissPreprocess = "make faiss/install" + Paths []string - helmOperatorRootdir = "/opt/helm" - helmOperatorWatchFile = helmOperatorRootdir + "/watches.yaml" - helmOperatorChartsDir = helmOperatorRootdir + "/charts" + Data struct { + AliasImage bool + ConfigExists bool + Year int + ContainerType ContainerType + AppName string + BinDir string + BuildPlatforms string + BuildStageName string + BuildUser string + BuilderImage string + BuilderTag string + Maintainer string + Name string + PackageDir string + RootDir string + RuntimeImage string + RuntimeTag string + RuntimeUser string + Arguments map[string]string + Environments map[string]string + Entrypoints []string + EnvironmentsSlice []string + ExtraCopies []string + ExtraImages []string + ExtraPackages []string + Preprocess []string + PullRequestPaths []string + RunCommands []string + RunMounts []string + StageFiles []string + } + ContainerType int ) const ( @@ -274,28 +398,28 @@ var ( defaultEnvironments = map[string]string{ "DEBIAN_FRONTEND": "noninteractive", - "HOME": "/root", - "USER": "root", + "HOME": "/" + rootUser, + "USER": rootUser, "INITRD": "No", "LANG": defaultLanguage, "LANGUAGE": defaultLanguage, "LC_ALL": defaultLanguage, "ORG": organization, "TZ": "Etc/UTC", - "PATH": "${PATH}:/usr/local/bin", + "PATH": "${PATH}:" + usrLocalBinaryDir, "REPO": repository, } goDefaultEnvironments = map[string]string{ "GOROOT": "/opt/go", "GOPATH": "/go", "GO111MODULE": "on", - "PATH": "${PATH}:${GOROOT}/bin:${GOPATH}/bin:/usr/local/bin", + "PATH": "${PATH}:${GOROOT}/bin:${GOPATH}/bin:" + usrLocalBinaryDir, } rustDefaultEnvironments = map[string]string{ - "RUST_HOME": "/usr/loacl/lib/rust", + "RUST_HOME": usrLocalLibDir + "/rust", "RUSTUP_HOME": "${RUST_HOME}/rustup", "CARGO_HOME": "${RUST_HOME}/cargo", - "PATH": "${PATH}:${RUSTUP_HOME}/bin:${CARGO_HOME}/bin:/usr/local/bin", + "PATH": "${PATH}:${RUSTUP_HOME}/bin:${CARGO_HOME}/bin:" + usrLocalBinaryDir, } clangDefaultEnvironments = map[string]string{ "CC": "gcc", @@ -312,6 +436,10 @@ var ( "make GOARCH=\"${TARGETARCH}\" GOOS=\"${TARGETOS}\" REPO=\"${ORG}\" NAME=\"${REPO}\" cmd/${PKG}/${APP_NAME}", "mv \"cmd/${PKG}/${APP_NAME}\" \"{{$.BinDir}}/${APP_NAME}\"", } + goExampleBuildCommands = []string{ + "make GOARCH=\"${TARGETARCH}\" GOOS=\"${TARGETOS}\" REPO=\"${ORG}\" NAME=\"${REPO}\" ${PKG}/${APP_NAME}", + "mv \"${PKG}/${APP_NAME}\" \"{{$.BinDir}}/${APP_NAME}\"", + } rustBuildCommands = []string{ "make rust/target/release/${APP_NAME}", "mv \"rust/target/release/${APP_NAME}\" \"{{$.BinDir}}/${APP_NAME}\"", @@ -321,21 +449,21 @@ var ( defaultMounts = []string{ "--mount=type=bind,target=.,rw", "--mount=type=tmpfs,target=/tmp", - "--mount=type=cache,target=/var/lib/apt,sharing=locked", - "--mount=type=cache,target=/var/cache/apt,sharing=locked", + "--mount=type=cache,target=/var/lib/apt,sharing=locked,id=${APP_NAME}", + "--mount=type=cache,target=/var/cache/apt,sharing=locked,id=${APP_NAME}", } - goDefaultMounts = []string{ "--mount=type=cache,target=\"${GOPATH}/pkg\",id=\"go-build-${TARGETARCH}\"", "--mount=type=cache,target=\"${HOME}/.cache/go-build\",id=\"go-build-${TARGETARCH}\"", + "--mount=type=tmpfs,target=\"${GOPATH}/src\"", } clangBuildDeps = []string{ "cmake", - "gcc", "g++", - "unzip", + "gcc", "libssl-dev", + "unzip", } ngtBuildDeps = []string{ "liblapack-dev", @@ -345,13 +473,17 @@ var ( faissBuildDeps = []string{ "gfortran", } + rustBuildDeps = []string{ + "pkg-config", + } devContainerDeps = []string{ + "file", "gawk", "gnupg2", "graphviz", "jq", - "libhdf5-dev", "libaec-dev", + "libhdf5-dev", "sed", "zip", } @@ -370,13 +502,12 @@ var ( "make kind/install", "make kubectl/install", "make kubelinter/install", - "make reviewdog/install", - "make tparse/install", - "make valdcli/install", - "make yq/install", "make minikube/install", + "make reviewdog/install", "make stern/install", "make telepresence/install", + "make tparse/install", + "make yq/install", } devContainerPreprocess = []string{ @@ -406,7 +537,7 @@ func appendM[K comparable](maps ...map[K]string) map[K]string { for _, m := range maps[1:] { for k, v := range m { ev, ok := result[k] - if ok { + if ok && !strings.Contains(v, ev) { v += ":" + ev } result[k] = v @@ -428,33 +559,51 @@ func appendM[K comparable](maps ...map[K]string) map[K]string { return result } -var re = regexp.MustCompile(`\$\{?(\w+)\}?`) - +// extractVariables efficiently extracts variables from strings func extractVariables(value string) []string { - matches := re.FindAllStringSubmatch(value, -1) - vars := make([]string, 0, len(matches)) - for _, match := range matches { - vars = append(vars, match[1]) + var vars []string + start := -1 + for i := 0; i < len(value); i++ { + if value[i] == '$' && i+1 < len(value) && value[i+1] == '{' { + start = i + 2 + } else if start != -1 && value[i] == '}' { + vars = append(vars, value[start:i]) + start = -1 + } else if value[i] == '$' && start == -1 { + start = i + 1 + for start < len(value) && (('a' <= value[start] && value[start] <= 'z') || ('A' <= value[start] && value[start] <= 'Z') || ('0' <= value[start] && value[start] <= '9') || value[start] == '_') { + start++ + } + vars = append(vars, value[i+1:start]) + i = start - 1 + start = -1 + } } return vars } +// topologicalSort sorts the elements topologically and ensures that equal-level nodes are sorted by name func topologicalSort(envMap map[string]string) []string { - // Graph structures - inDegree := make(map[string]int) - graph := make(map[string][]string) + inDegree := make(map[string]int) // Tracks the in-degree of each node + graph := make(map[string][]string) // Tracks the edges between nodes + result := make([]string, 0, len(envMap)) // Result slice pre-allocated for efficiency - // Initialize the graph + gl := 0 + // Initialize the graph structure and in-degrees for key, value := range envMap { vars := extractVariables(value) for _, refKey := range vars { - if refKey != key { + if refKey != key { // Prevent self-dependency graph[refKey] = append(graph[refKey], key) + if len(graph[refKey]) > gl { + gl = len(graph[refKey]) + } inDegree[key]++ } } } + // Initialize the queue with nodes having in-degree 0 (no dependencies) queue := make([]string, 0, len(envMap)-len(graph)) for key := range envMap { if inDegree[key] == 0 { @@ -462,22 +611,35 @@ func topologicalSort(envMap map[string]string) []string { } } + // Sort the initial queue to maintain lexicographical order for nodes with no dependencies slices.Sort(queue) - // Topological sort - result := make([]string, 0, len(envMap)) + // Preallocate a reusable slice for collecting new nodes + newNodes := make([]string, 0, gl) + // Topological sort process for len(queue) > 0 { node := queue[0] queue = queue[1:] + + // Append the result as `node=value` if value, exists := envMap[node]; exists { result = append(result, node+"="+value) } + + // Process all neighbors and decrement their in-degrees for _, neighbor := range graph[node] { inDegree[neighbor]-- if inDegree[neighbor] == 0 { - queue = append(queue, neighbor) + newNodes = append(newNodes, neighbor) } } + + // If new nodes were found, sort them and append to the queue + if len(newNodes) > 0 { + slices.Sort(newNodes) // Sort new nodes only once + queue = append(queue, newNodes...) + newNodes = newNodes[:0] // Reuse the slice by resetting it + } } return result @@ -497,7 +659,6 @@ func main() { syscall.SIGKILL, syscall.SIGTERM) defer cancel() - log.Debug(tmpl) maintainer := os.Getenv(maintainerKey) if maintainer == "" { @@ -506,34 +667,35 @@ func main() { year := time.Now().Year() eg, egctx := errgroup.New(ctx) for n, d := range map[string]Data{ - "vald-agent-ngt": { + "vald-" + agentNGT: { AppName: "ngt", - PackageDir: "agent/core/ngt", + PackageDir: agent + "/core/ngt", ExtraPackages: append(clangBuildDeps, ngtBuildDeps...), Preprocess: []string{ngtPreprocess}, }, - "vald-agent-faiss": { + "vald-" + agentFaiss: { AppName: "faiss", - PackageDir: "agent/core/faiss", + PackageDir: agent + "/core/faiss", ExtraPackages: append(clangBuildDeps, append(ngtBuildDeps, faissBuildDeps...)...), Preprocess: []string{faissPreprocess}, }, - "vald-agent": { - AppName: "agent", - PackageDir: "agent/core/agent", + "vald-" + agent: { + AppName: agent, + PackageDir: agent + "/core/" + agent, ContainerType: Rust, RuntimeImage: "gcr.io/distroless/cc-debian12", ExtraPackages: append(clangBuildDeps, append(ngtBuildDeps, - faissBuildDeps...)...), + append(faissBuildDeps, + rustBuildDeps...)...)...), Preprocess: []string{ ngtPreprocess, faissPreprocess, }, }, - "vald-agent-sidecar": { + "vald-" + agentSidecar: { AppName: "sidecar", PackageDir: "agent/sidecar", }, @@ -569,6 +731,10 @@ func main() { AppName: "index-save", PackageDir: "index/job/save", }, + "vald-index-deletion": { + AppName: "index-deletion", + PackageDir: "index/job/deletion", + }, "vald-readreplica-rotate": { AppName: "readreplica-rotate", PackageDir: "index/job/readreplica/rotate", @@ -597,7 +763,7 @@ func main() { "OPERATOR_SDK_VERSION": "latest", }, ExtraCopies: []string{ - "--from=operator /usr/local/bin/${APP_NAME} {{$.BinDir}}/${APP_NAME}", + "--from=operator " + usrLocalBinaryDir + "/${APP_NAME} {{$.BinDir}}/${APP_NAME}", }, ExtraImages: []string{ "quay.io/operator-framework/helm-operator:${OPERATOR_SDK_VERSION} AS operator", @@ -628,7 +794,7 @@ func main() { }, Entrypoints: []string{"{{$.BinDir}}/{{.AppName}}", "run", "--watches-file=" + helmOperatorWatchFile}, }, - "vald-cli-loadtest": { + "vald-loadtest": { AppName: "loadtest", PackageDir: "tools/cli/loadtest", ExtraPackages: append(clangBuildDeps, "libhdf5-dev", "libaec-dev"), @@ -644,8 +810,9 @@ func main() { ExtraPackages: append([]string{"npm"}, append(clangBuildDeps, append(ngtBuildDeps, append(faissBuildDeps, - devContainerDeps...)...)...)...), - Preprocess: append(ciContainerPreprocess, ngtPreprocess, faissPreprocess), + append(rustBuildDeps, + devContainerDeps...)...)...)...)...), + Preprocess: append(ciContainerPreprocess, ngtPreprocess, faissPreprocess, usearchPreprocess), Entrypoints: []string{"/bin/bash"}, }, "vald-dev-container": { @@ -659,12 +826,21 @@ func main() { ExtraPackages: append(clangBuildDeps, append(ngtBuildDeps, append(faissBuildDeps, - devContainerDeps...)...)...), + append(rustBuildDeps, + devContainerDeps...)...)...)...), Preprocess: append(devContainerPreprocess, append(ciContainerPreprocess, ngtPreprocess, faissPreprocess)...), }, + "vald-example-client": { + AppName: "client", + PackageDir: "example/client", + ExtraPackages: append(clangBuildDeps, "libhdf5-dev", "libaec-dev"), + Preprocess: []string{ + "make hdf5/install", + }, + }, "vald-buildbase": { AppName: "buildbase", AliasImage: true, @@ -698,6 +874,186 @@ func main() { name := n data := d + eg.Go(safety.RecoverFunc(func() error { + data.Name = strings.TrimPrefix(name, "vald-") + switch data.ContainerType { + case HelmOperator: + data.PullRequestPaths = append(data.PullRequestPaths, + chartPath, + valuesPath, + templatesPath, + helmOperatorChartPath, + helmOperatorValuesPath, + helmOperatorTemplatesPath, + operatorSDKVersionPath, + ) + case DevContainer, CIContainer: + data.PullRequestPaths = append(data.PullRequestPaths, + apisProtoPath, + hackPath, + ) + case Go: + data.PullRequestPaths = append(data.PullRequestPaths, + apisProtoPath, + goModPath, + goSumPath, + goVersionPath, + excludeTestFilesPath, + excludeMockFilesPath, + ) + mainFile := file.Join(os.Args[1], "cmd", data.PackageDir, "main.go") + if file.Exists(mainFile) { + ns, err := buildDependencyTree(os.Args[1], mainFile) + if err != nil { + log.Error(err) + } + pkgs := make([]string, 0, len(ns)+1) + pkgs = append(pkgs, file.Join("cmd", data.PackageDir)) + for _, pnode := range ns { + pkgs = append(pkgs, pnode.ToSlice()...) + } + slices.Sort(pkgs) + pkgs = slices.Compact(pkgs) + root, err := os.Getwd() + if err != nil { + root = os.Getenv("HOME") + } + if root != "" && !strings.HasSuffix(root, string(os.PathSeparator)) { + root += string(os.PathSeparator) + } + for i, pkg := range pkgs { + const splitWord = "/vdaas/vald/" + pkg = file.Join(pkg, "*.go") + index := strings.LastIndex(pkg, splitWord) + if index != -1 { + pkg = pkg[index+len(splitWord):] + } + if root != "" { + pkg = strings.TrimPrefix(pkg, root) + } + pkgs[i] = pkg + } + data.PullRequestPaths = append(data.PullRequestPaths, pkgs...) + } + case Rust: + data.PullRequestPaths = append(data.PullRequestPaths, + apisProtoPath, + cargoLockPath, + cargoTomlPath, + rustBinAgentDirPath, + rustNgtRsPath, + rustNgtPath, + rustProtoPath, + rustVersionPath, + ) + } + if strings.EqualFold(data.Name, agentFaiss) || data.ContainerType == Rust { + data.PullRequestPaths = append(data.PullRequestPaths, faissVersionPath) + } + if strings.EqualFold(data.Name, agentNGT) || data.ContainerType == Rust { + data.PullRequestPaths = append(data.PullRequestPaths, ngtVersionPath) + } + + if !data.AliasImage { + data.PullRequestPaths = append(data.PullRequestPaths, makefilePath, makefileDirPath) + } + + if data.AliasImage { + data.BuildPlatforms = multiPlatforms + } + if data.ContainerType == CIContainer || data.Name == loadtest { + data.BuildPlatforms = amd64Platform + } + + data.Year = time.Now().Year() + if maintainer := os.Getenv(maintainerKey); maintainer != "" { + data.Maintainer = maintainer + } else { + data.Maintainer = defaultMaintainer + } + + log.Infof("Generating %s's workflow", data.Name) + workflow := new(Workflow) + err := yaml.Unmarshal(conv.Atob(`name: "Build docker image: `+data.Name+`" +on: + schedule: + - cron: "0 * * * *" + push: + branches: + - "main" + - "release/v*.*" + - "!release/v*.*.*" + tags: + - "*.*.*" + - "*.*.*-*" + - "v*.*.*" + - "v*.*.*-*" + pull_request: + paths: + - ".github/actions/docker-build/action.yaml" + - ".github/workflows/_docker-image.yaml" + - ".github/workflows/dockers-`+data.Name+`-image.yaml" + - "dockers/`+data.PackageDir+`/Dockerfile" + - "hack/docker/gen/main.go" + pull_request_target: + paths: [] + +jobs: + build: + uses: "./.github/workflows/_docker-image.yaml" + with: + target: "`+data.Name+`" + platforms: "" + secrets: "inherit" +`), &workflow) + if err != nil { + return fmt.Errorf("Error decoding YAML: %v", err) + } + + if !data.AliasImage { + workflow.On.Schedule = nil + } + workflow.On.PullRequest.Paths = append(workflow.On.PullRequest.Paths, data.PullRequestPaths...) + if strings.EqualFold(data.Name, exampleContainer) { + workflow.On.PullRequest.Paths = slices.DeleteFunc(workflow.On.PullRequest.Paths, func(path string) bool { + return strings.HasPrefix(path, "cmd") || strings.HasPrefix(path, "pkg") + }) + workflow.On.PullRequest.Paths = append(workflow.On.PullRequest.Paths, data.PackageDir+"/**") + } + slices.Sort(workflow.On.PullRequest.Paths) + workflow.On.PullRequest.Paths = slices.Compact(workflow.On.PullRequest.Paths) + + workflow.On.PullRequestTarget.Paths = workflow.On.PullRequest.Paths + workflow.Jobs.Build.With.Platforms = data.BuildPlatforms + + workflowYamlTmp, err := yaml.Marshal(workflow) + if err != nil { + return fmt.Errorf("error marshaling workflowStruct to YAML: %w", err) + } + + // remove the double quotation marks from the generated key "on": (note that the word "on" is a reserved word in sigs.k8s.io/yaml) + workflowYaml := strings.Replace(string(workflowYamlTmp), "\"on\":", "on:", 1) + + if len(header) > (int(^uint(0)>>1) - len(workflowYaml)) { + return fmt.Errorf("size computation for allocation may overflow") + } + totalLen := len(header) + len(workflowYaml) + + buf := bytes.NewBuffer(make([]byte, 0, totalLen)) + err = license.Execute(buf, data) + if err != nil { + return fmt.Errorf("error executing template: %w", err) + } + buf.WriteString("\r\n") + buf.WriteString(workflowYaml) + fileName := file.Join(os.Args[1], ".github/workflows", "dockers-"+data.Name+"-image.yaml") + _, err = file.OverWriteFile(egctx, fileName, buf, fs.ModePerm) + if err != nil { + return fmt.Errorf("error writing workflow file for %s error: %w", fileName, err) + } + return nil + })) + eg.Go(safety.RecoverFunc(func() error { data.Maintainer = maintainer data.Year = year @@ -742,6 +1098,8 @@ func main() { } if file.Exists(file.Join(os.Args[1], "cmd", data.PackageDir)) { commands = append(commands, goBuildCommands...) + } else if strings.HasPrefix(data.PackageDir, "example") && file.Exists(file.Join(os.Args[1], data.PackageDir)) { + commands = append(commands, goExampleBuildCommands...) } data.RunCommands = commands mounts := make([]string, 0, len(defaultMounts)+len(goDefaultMounts)) @@ -790,14 +1148,11 @@ func main() { data.RootDir = "${HOME}" data.Environments["ROOTDIR"] = os.Args[1] } - if strings.Contains(data.BuildUser, "root") { - data.Environments["HOME"] = "/root" - data.Environments["USER"] = "root" + if strings.Contains(data.BuildUser, rootUser) { + data.Environments["HOME"] = "/" + rootUser + data.Environments["USER"] = rootUser } else { - user := data.BuildUser - if strings.Contains(user, ":") { - user = strings.SplitN(user, ":", 2)[0] - } + user, _, _ := strings.Cut(data.BuildUser, ":") data.Environments["HOME"] = "/home/" + user data.Environments["USER"] = user } @@ -807,15 +1162,116 @@ func main() { data.EnvironmentsSlice = topologicalSort(data.Environments) data.ConfigExists = file.Exists(file.Join(os.Args[1], "cmd", data.PackageDir, "sample.yaml")) - buf := bytes.NewBuffer(make([]byte, 0, len(tmpl))) + buf := bytes.NewBuffer(make([]byte, 0, 1024)) log.Infof("Generating %s's Dockerfile", name) docker.Execute(buf, data) tpl := buf.String() buf.Reset() template.Must(template.New("Dockerfile").Parse(tpl)).Execute(buf, data) - file.OverWriteFile(egctx, file.Join(os.Args[1], "dockers", data.PackageDir, "Dockerfile"), buf, fs.ModePerm) + fileName := file.Join(os.Args[1], "dockers", data.PackageDir, "Dockerfile") + _, err := file.OverWriteFile(egctx, fileName, buf, fs.ModePerm) + if err != nil { + return fmt.Errorf("error writing Dockerfile for %s error: %w", fileName, err) + } return nil })) } eg.Wait() } + +// PackageNode represents a node in the dependency tree. +type PackageNode struct { + Name string + Imports []*PackageNode +} + +// ToSlice traverses the dependency tree and returns all dependencies as a slice. +func (n PackageNode) ToSlice() (pkgs []string) { + pkgs = make([]string, 0, len(n.Imports)+1) + if n.Name != "command-line-arguments" { + pkgs = append(pkgs, n.Name) + } + for _, node := range n.Imports { + pkgs = append(pkgs, node.ToSlice()...) + } + return pkgs +} + +// String returns string of the dependency tree in a readable format. +func (n PackageNode) String() string { + return n.string(0) +} + +func (n PackageNode) string(depth int) (tree string) { + tree = fmt.Sprintf("%s- %s\n", strings.Repeat(" ", depth), n.Name) + for _, node := range n.Imports { + tree += node.string(depth + 1) + } + return tree +} + +// processDependencies processes package dependencies while avoiding duplicate processing. +func processDependencies( + pkg *packages.Package, + nodes map[string]*PackageNode, + mu *sync.Mutex, + checkList map[string]*PackageNode, + wg *sync.WaitGroup, +) *PackageNode { + if !strings.Contains(pkg.PkgPath, "vdaas/vald") && pkg.Name != "main" { + return nil + } + if node, exists := checkList[pkg.PkgPath]; exists { + return node + } + + node := &PackageNode{Name: pkg.PkgPath} + nodes[pkg.PkgPath] = node + checkList[pkg.PkgPath] = node + for _, imp := range pkg.Imports { + if !strings.Contains(imp.PkgPath, "vdaas/vald") { + continue + } + if child, exists := checkList[imp.PkgPath]; exists { + node.Imports = append(node.Imports, child) + continue + } + child := processDependencies(imp, nodes, mu, checkList, wg) + if child != nil { + node.Imports = append(node.Imports, child) + } + } + + return node +} + +// buildDependencyTree constructs a dependency tree for multiple entry packages. +func buildDependencyTree(rootDir, entryFile string) ([]*PackageNode, error) { + cfg := &packages.Config{ + Mode: packages.NeedName | packages.NeedImports | packages.NeedDeps, + Dir: rootDir, + } + + // Use entry file (e.g., main.go) as the root for analysis. + pkgs, err := packages.Load(cfg, entryFile) + if err != nil { + return nil, err + } + + nodes := make(map[string]*PackageNode) + checkList := make(map[string]*PackageNode, len(pkgs)) // Tracks processed packages + var mu sync.Mutex + var wg sync.WaitGroup + + // Process all entry packages + var roots []*PackageNode + for _, pkg := range pkgs { + root := processDependencies(pkg, nodes, &mu, checkList, &wg) + if root != nil { + roots = append(roots, root) + } + } + wg.Wait() + + return roots, nil +} diff --git a/hack/git/hooks/pre-commit b/hack/git/hooks/pre-commit index 7e65545c59..6aa410ae3b 100644 --- a/hack/git/hooks/pre-commit +++ b/hack/git/hooks/pre-commit @@ -37,4 +37,5 @@ if git diff HEAD^ --name-only | grep ".go$" > /dev/null; then fi mv -f ${TMPDIR}/go.mod . mv -f ${TMPDIR}/go.sum . + git diff --cached --name-only | xargs -r git add --renormalize fi diff --git a/hack/go.mod.default b/hack/go.mod.default index 5582e58f87..9c32b6fc38 100644 --- a/hack/go.mod.default +++ b/hack/go.mod.default @@ -1,6 +1,6 @@ module github.com/vdaas/vald -go 1.23.0 +go 1.23.4 replace ( cloud.google.com/go => cloud.google.com/go upgrade @@ -13,7 +13,7 @@ replace ( cloud.google.com/go/monitoring => cloud.google.com/go/monitoring upgrade cloud.google.com/go/pubsub => cloud.google.com/go/pubsub upgrade cloud.google.com/go/secretmanager => cloud.google.com/go/secretmanager upgrade - cloud.google.com/go/storage => cloud.google.com/go/storage upgrade + cloud.google.com/go/storage => cloud.google.com/go/storage v1.47.0 // https://github.com/googleapis/google-cloud-go/issues/11283 cloud.google.com/go/trace => cloud.google.com/go/trace upgrade code.cloudfoundry.org/bytefmt => code.cloudfoundry.org/bytefmt upgrade contrib.go.opencensus.io/exporter/aws => contrib.go.opencensus.io/exporter/aws upgrade @@ -131,7 +131,7 @@ replace ( github.com/gobwas/httphead => github.com/gobwas/httphead upgrade github.com/gobwas/pool => github.com/gobwas/pool upgrade github.com/gobwas/ws => github.com/gobwas/ws upgrade - github.com/goccy/go-json => github.com/goccy/go-json upgrade + github.com/goccy/go-json => github.com/goccy/go-json v0.10.3 // https://github.com/goccy/go-json/issues/529 github.com/gocql/gocql => github.com/gocql/gocql upgrade github.com/gocraft/dbr/v2 => github.com/gocraft/dbr/v2 upgrade github.com/godbus/dbus/v5 => github.com/godbus/dbus/v5 upgrade @@ -329,21 +329,21 @@ replace ( gopkg.in/yaml.v2 => gopkg.in/yaml.v2 upgrade gopkg.in/yaml.v3 => gopkg.in/yaml.v3 upgrade honnef.co/go/tools => honnef.co/go/tools upgrade - k8s.io/api => k8s.io/api v0.30.3 - k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.30.3 - k8s.io/apimachinery => k8s.io/apimachinery v0.30.3 - k8s.io/cli-runtime => k8s.io/cli-runtime v0.30.3 - k8s.io/client-go => k8s.io/client-go v0.30.3 - k8s.io/component-base => k8s.io/component-base v0.30.3 + k8s.io/api => k8s.io/api v0.32.0 + k8s.io/apiextensions-apiserver => k8s.io/apiextensions-apiserver v0.32.0 + k8s.io/apimachinery => k8s.io/apimachinery v0.32.0 + k8s.io/cli-runtime => k8s.io/cli-runtime v0.32.0 + k8s.io/client-go => k8s.io/client-go v0.32.0 + k8s.io/component-base => k8s.io/component-base v0.32.0 k8s.io/klog/v2 => k8s.io/klog/v2 upgrade k8s.io/kube-openapi => k8s.io/kube-openapi master - k8s.io/kubernetes => k8s.io/kubernetes v0.30.3 - k8s.io/metrics => k8s.io/metrics v0.30.3 + k8s.io/kubernetes => k8s.io/kubernetes v0.32.0 + k8s.io/metrics => k8s.io/metrics v0.32.0 nhooyr.io/websocket => nhooyr.io/websocket upgrade rsc.io/pdf => rsc.io/pdf upgrade - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.18.4 + sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.19.3 sigs.k8s.io/json => sigs.k8s.io/json upgrade sigs.k8s.io/kustomize => sigs.k8s.io/kustomize upgrade - sigs.k8s.io/structured-merge-diff/v4 => sigs.k8s.io/structured-merge-diff/v4 upgrade + sigs.k8s.io/structured-merge-diff/v4 => sigs.k8s.io/structured-merge-diff/v4 v4.5.0 sigs.k8s.io/yaml => sigs.k8s.io/yaml upgrade ) diff --git a/hack/helm/schema/crd/main.go b/hack/helm/schema/crd/main.go index eaba39a8e9..05de24d1ba 100644 --- a/hack/helm/schema/crd/main.go +++ b/hack/helm/schema/crd/main.go @@ -28,7 +28,7 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/strings" - yaml "gopkg.in/yaml.v2" + yaml "sigs.k8s.io/yaml" ) const ( diff --git a/hack/license/gen/main.go b/hack/license/gen/main.go index 251956c086..7e53aac082 100644 --- a/hack/license/gen/main.go +++ b/hack/license/gen/main.go @@ -51,6 +51,7 @@ var ( {{.Escape}} `)) docker = template.Must(template.New("Apache License").Parse(`{{.Escape}} syntax = docker/dockerfile:latest +{{.Escape}} check=error=true {{.Escape}} {{.Escape}} Copyright (C) 2019-{{.Year}} {{.Maintainer}} {{.Escape}} @@ -132,6 +133,7 @@ const ( minimumArgumentLength = 2 defaultMaintainer = "vdaas.org vald team " maintainerKey = "MAINTAINER" + yearKey = "YEAR" ) func main() { @@ -160,7 +162,8 @@ func dirwalk(dir string) []string { if f.IsDir() { if !strings.Contains(f.Name(), "vendor") && !strings.Contains(f.Name(), "versions") && - !strings.Contains(f.Name(), ".git") || + !strings.Contains(f.Name(), ".git") && + !strings.Contains(f.Name(), "target") || strings.HasPrefix(f.Name(), ".github") { paths = append(paths, dirwalk(file.Join(dir, f.Name()))...) } @@ -264,9 +267,20 @@ func readAndRewrite(path string) error { if maintainer == "" { maintainer = defaultMaintainer } + var year int + if yearString := os.Getenv(yearKey); yearString == "" { + year = time.Now().Year() + } else { + y, err := time.Parse("2006", yearString) + if err != nil { + // skipcq: RVV-A0003 + log.Fatal(err) + } + year = y.Year() + } d := Data{ Maintainer: maintainer, - Year: time.Now().Year(), + Year: year, Escape: sharpEscape, } if fi.Name() == "LICENSE" { diff --git a/internal/backoff/backoff.go b/internal/backoff/backoff.go index c50d33511f..6535cff285 100644 --- a/internal/backoff/backoff.go +++ b/internal/backoff/backoff.go @@ -186,10 +186,40 @@ func (b *backoff) Do( dur *= b.backoffFactor jdur = b.addJitter(dur) } + if cnt >= b.maxRetryCount-1 { + select { + case <-dctx.Done(): + switch dctx.Err() { + case context.DeadlineExceeded: + log.Debugf("[backoff]\tfor: "+name+",\tDeadline Exceeded\terror: %v", err.Error()) + return nil, errors.ErrBackoffTimeout(err) + case context.Canceled: + log.Debugf("[backoff]\tfor: "+name+",\tCanceled\terror: %v", err.Error()) + return nil, err + default: + return nil, errors.Join(dctx.Err(), err) + } + default: + } + } } } } - return res, err + select { + case <-dctx.Done(): + switch dctx.Err() { + case context.DeadlineExceeded: + log.Debugf("[backoff]\tfor: "+name+",\tDeadline Exceeded\terror: %v", err.Error()) + return nil, errors.ErrBackoffTimeout(err) + case context.Canceled: + log.Debugf("[backoff]\tfor: "+name+",\tCanceled\terror: %v", err.Error()) + return nil, err + default: + return nil, errors.Join(dctx.Err(), err) + } + default: + return res, err + } } func (b *backoff) addJitter(dur float64) float64 { diff --git a/internal/backoff/backoff_test.go b/internal/backoff/backoff_test.go index a125c3bf7f..81cf535318 100644 --- a/internal/backoff/backoff_test.go +++ b/internal/backoff/backoff_test.go @@ -288,7 +288,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return nil, false, err } @@ -317,7 +317,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -346,7 +346,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -380,7 +380,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -413,7 +413,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -442,7 +442,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { return str, true, err } @@ -470,7 +470,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx, cancel := context.WithCancel(context.Background()) - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") f := func(context.Context) (any, bool, error) { cancel() return str, true, err @@ -499,7 +499,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx, cancel := context.WithCancel(context.Background()) - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ @@ -532,7 +532,7 @@ func Test_backoff_Do(t *testing.T) { }(), func() test { ctx := context.Background() - err := errors.New("erros is occurred") + err := errors.New("errors is occurred") cnt := 0 f := func(context.Context) (any, bool, error) { cnt++ diff --git a/internal/cache/cache.go b/internal/cache/cache.go index 9aba431ff5..ab280aa4c9 100644 --- a/internal/cache/cache.go +++ b/internal/cache/cache.go @@ -30,7 +30,7 @@ type cache[V any] struct { cacher cacher.Type expireDur time.Duration expireCheckDur time.Duration - expiredHook func(context.Context, string) + expiredHook func(context.Context, string, V) } // New returns the Cache instance or error. diff --git a/internal/cache/gache/gache.go b/internal/cache/gache/gache.go index f0f5f9210d..3523400738 100644 --- a/internal/cache/gache/gache.go +++ b/internal/cache/gache/gache.go @@ -29,7 +29,7 @@ type cache[V any] struct { gache gache.Gache[V] expireDur time.Duration expireCheckDur time.Duration - expiredHook func(context.Context, string) + expiredHook func(context.Context, string, V) } // New loads a cache model and returns a new cache struct. diff --git a/internal/cache/gache/gache_test.go b/internal/cache/gache/gache_test.go index a8e6d69d7e..55980176f9 100644 --- a/internal/cache/gache/gache_test.go +++ b/internal/cache/gache/gache_test.go @@ -76,7 +76,7 @@ func TestNew(t *testing.T) { } }(), func() test { - expiredHook := func(context.Context, string) {} + expiredHook := func(context.Context, string, any) {} c := new(cache[any]) for _, opt := range append(defaultOptions[any](), WithExpiredHook[any](expiredHook)) { opt(c) @@ -129,7 +129,7 @@ func Test_cache_Start(t *testing.T) { gache gache.Gache[any] expireDur time.Duration expireCheckDur time.Duration - expiredHook func(context.Context, string) + expiredHook func(context.Context, string, any) } type want struct{} type test struct { @@ -199,7 +199,7 @@ func Test_cache_Get(t *testing.T) { gache gache.Gache[any] expireDur time.Duration expireCheckDur time.Duration - expiredHook func(context.Context, string) + expiredHook func(context.Context, string, any) } type want struct { want any @@ -299,7 +299,7 @@ func Test_cache_Set(t *testing.T) { gache gache.Gache[any] expireDur time.Duration expireCheckDur time.Duration - expiredHook func(context.Context, string) + expiredHook func(context.Context, string, any) } type want struct { key string @@ -383,7 +383,7 @@ func Test_cache_Delete(t *testing.T) { gache gache.Gache[any] expireDur time.Duration expireCheckDur time.Duration - expiredHook func(context.Context, string) + expiredHook func(context.Context, string, any) } type want struct { key string @@ -487,7 +487,7 @@ func Test_cache_GetAndDelete(t *testing.T) { gache gache.Gache[any] expireDur time.Duration expireCheckDur time.Duration - expiredHook func(context.Context, string) + expiredHook func(context.Context, string, any) } type want struct { want any diff --git a/internal/cache/gache/option.go b/internal/cache/gache/option.go index 3b1f51439a..4c3eea14ae 100644 --- a/internal/cache/gache/option.go +++ b/internal/cache/gache/option.go @@ -42,7 +42,7 @@ func WithGache[V any](g gache.Gache[V]) Option[V] { } // WithExpiredHook returns Option after set expiredHook when f is not nil. -func WithExpiredHook[V any](f func(context.Context, string)) Option[V] { +func WithExpiredHook[V any](f func(context.Context, string, V)) Option[V] { return func(c *cache[V]) { if f != nil { c.expiredHook = f diff --git a/internal/cache/gache/option_test.go b/internal/cache/gache/option_test.go index 2774f2d29f..75389f79a2 100644 --- a/internal/cache/gache/option_test.go +++ b/internal/cache/gache/option_test.go @@ -60,7 +60,7 @@ func TestDefaultOptions(t *testing.T) { tests := []test{ { - name: "set succuess", + name: "set success", want: want{ want: &cache[any]{ gache: gache.New[any](), @@ -122,7 +122,7 @@ func TestWithGache(t *testing.T) { func() test { ga := gache.New[any]() return test{ - name: "set succuess when g is not nil", + name: "set success when g is not nil", args: args{ g: ga, }, @@ -135,7 +135,7 @@ func TestWithGache(t *testing.T) { }(), func() test { return test{ - name: "set succuess when g is nil", + name: "set success when g is nil", want: want{ want: new(T), }, @@ -170,7 +170,7 @@ func TestWithGache(t *testing.T) { func TestWithExpiredHook(t *testing.T) { type T = cache[any] type args struct { - f func(context.Context, string) + f func(context.Context, string, any) } type want struct { want *T @@ -193,9 +193,9 @@ func TestWithExpiredHook(t *testing.T) { tests := []test{ func() test { - fn := func(context.Context, string) {} + fn := func(context.Context, string, any) {} return test{ - name: "set succuess when f is not nil", + name: "set success when f is not nil", args: args{ f: fn, }, @@ -214,7 +214,7 @@ func TestWithExpiredHook(t *testing.T) { }(), func() test { return test{ - name: "set succuess when fn is nil", + name: "set success when fn is nil", want: want{ want: new(T), }, @@ -272,7 +272,7 @@ func TestWithExpireDuration(t *testing.T) { tests := []test{ { - name: "set succuess when dur is 0", + name: "set success when dur is 0", args: args{ dur: 0, }, @@ -281,7 +281,7 @@ func TestWithExpireDuration(t *testing.T) { }, }, { - name: "set succuess when dur is not 0", + name: "set success when dur is not 0", args: args{ dur: 10, }, @@ -343,7 +343,7 @@ func TestWithExpireCheckDuration(t *testing.T) { tests := []test{ { - name: "set succuess when dur is 0", + name: "set success when dur is 0", args: args{ dur: 0, }, @@ -352,7 +352,7 @@ func TestWithExpireCheckDuration(t *testing.T) { }, }, { - name: "set succuess when dur is not 0", + name: "set success when dur is not 0", args: args{ dur: 10, }, diff --git a/internal/cache/option.go b/internal/cache/option.go index 6e264ea461..bd4ef7d705 100644 --- a/internal/cache/option.go +++ b/internal/cache/option.go @@ -36,7 +36,7 @@ func defaultOptions[V any]() []Option[V] { } // WithExpiredHook returns Option after set expiredHook when f is not nil. -func WithExpiredHook[V any](f func(context.Context, string)) Option[V] { +func WithExpiredHook[V any](f func(context.Context, string, V)) Option[V] { return func(c *cache[V]) { if f != nil { c.expiredHook = f @@ -55,7 +55,7 @@ func WithType[V any](mo string) Option[V] { } } -// WithExpireDuration returns Option after set expireDur when dur is cprrect param. +// WithExpireDuration returns Option after set expireDur when dur is correct param. func WithExpireDuration[V any](dur string) Option[V] { return func(c *cache[V]) { if len(dur) == 0 { @@ -69,7 +69,7 @@ func WithExpireDuration[V any](dur string) Option[V] { } } -// WithExpireCheckDuration returns Option after set expireCheckDur when dur is cprrect param. +// WithExpireCheckDuration returns Option after set expireCheckDur when dur is correct param. func WithExpireCheckDuration[V any](dur string) Option[V] { return func(c *cache[V]) { if len(dur) == 0 { diff --git a/internal/cache/option_test.go b/internal/cache/option_test.go index 1f5df6105f..e9a4060f81 100644 --- a/internal/cache/option_test.go +++ b/internal/cache/option_test.go @@ -30,7 +30,7 @@ import ( func TestWithExpiredHook(t *testing.T) { type args struct { - f func(context.Context, string) + f func(context.Context, string, any) } type want struct { want *cache[any] @@ -51,7 +51,7 @@ func TestWithExpiredHook(t *testing.T) { } tests := []test{ func() test { - fn := func(context.Context, string) {} + fn := func(context.Context, string, any) {} return test{ name: "set success when f is not nil", args: args{ @@ -93,7 +93,7 @@ func TestWithExpiredHook(t *testing.T) { } got := new(cache[any]) - opts := WithExpiredHook[any](test.args.f) + opts := WithExpiredHook(test.args.f) opts(got) if err := checkFunc(test.want, got); err != nil { tt.Errorf("error = %v", err) diff --git a/internal/circuitbreaker/breaker.go b/internal/circuitbreaker/breaker.go index e20f9200f3..c064bfbf6a 100644 --- a/internal/circuitbreaker/breaker.go +++ b/internal/circuitbreaker/breaker.go @@ -35,7 +35,7 @@ type breaker struct { minSamples int64 openTimeout time.Duration openExp int64 // unix time - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 // unix time } @@ -172,7 +172,7 @@ func (b *breaker) currentState() State { func (b *breaker) reset() { atomic.StoreInt32(&b.tripped, 0) atomic.StoreInt64(&b.openExp, 0) - atomic.StoreInt64(&b.closedRefreshExp, time.Now().Add(b.cloedRefreshTimeout).UnixNano()) + atomic.StoreInt64(&b.closedRefreshExp, time.Now().Add(b.closedRefreshTimeout).UnixNano()) b.count.reset() } diff --git a/internal/circuitbreaker/breaker_test.go b/internal/circuitbreaker/breaker_test.go index 83850a1f39..c18ddddf51 100644 --- a/internal/circuitbreaker/breaker_test.go +++ b/internal/circuitbreaker/breaker_test.go @@ -35,7 +35,7 @@ func Test_breaker_isReady(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct { @@ -162,7 +162,7 @@ func Test_breaker_isReady(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } @@ -186,7 +186,7 @@ func Test_breaker_success(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct{} @@ -281,7 +281,7 @@ func Test_breaker_success(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } if test.afterFunc != nil { @@ -308,7 +308,7 @@ func Test_breaker_fail(t *testing.T) { minSamples int64 openTimeout time.Duration openExp int64 - cloedRefreshTimeout time.Duration + closedRefreshTimeout time.Duration closedRefreshExp int64 } type want struct{} @@ -409,7 +409,7 @@ func Test_breaker_fail(t *testing.T) { t.Errorf("state changed: %d", b.tripped) } if total := b.count.Total(); total == 0 { - t.Errorf("count reseted: %d", total) + t.Errorf("count resetted: %d", total) } }, } @@ -439,7 +439,7 @@ func Test_breaker_fail(t *testing.T) { minSamples: test.fields.minSamples, openTimeout: test.fields.openTimeout, openExp: test.fields.openExp, - cloedRefreshTimeout: test.fields.cloedRefreshTimeout, + closedRefreshTimeout: test.fields.closedRefreshTimeout, closedRefreshExp: test.fields.closedRefreshExp, } if test.afterFunc != nil { @@ -564,7 +564,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -613,7 +613,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -647,7 +647,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -689,7 +689,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -713,7 +713,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -749,7 +749,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -779,7 +779,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -821,7 +821,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -845,7 +845,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct{} @@ -876,7 +876,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -906,7 +906,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -948,7 +948,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -972,7 +972,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct{} @@ -1003,7 +1003,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1033,7 +1033,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1075,7 +1075,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // @@ -1099,7 +1099,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples int64 // openTimeout time.Duration // openExp int64 -// cloedRefreshTimeout time.Duration +// closedRefreshTimeout time.Duration // closedRefreshExp int64 // } // type want struct { @@ -1135,7 +1135,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1165,7 +1165,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples:0, // openTimeout:nil, // openExp:0, -// cloedRefreshTimeout:nil, +// closedRefreshTimeout:nil, // closedRefreshExp:0, // }, // want: want{}, @@ -1207,7 +1207,7 @@ func Test_breaker_fail(t *testing.T) { // minSamples: test.fields.minSamples, // openTimeout: test.fields.openTimeout, // openExp: test.fields.openExp, -// cloedRefreshTimeout: test.fields.cloedRefreshTimeout, +// closedRefreshTimeout: test.fields.closedRefreshTimeout, // closedRefreshExp: test.fields.closedRefreshExp, // } // diff --git a/internal/circuitbreaker/options.go b/internal/circuitbreaker/options.go index 058f76ffcc..a62038e7b8 100644 --- a/internal/circuitbreaker/options.go +++ b/internal/circuitbreaker/options.go @@ -131,7 +131,7 @@ func WithClosedRefreshTimeout(timeout string) BreakerOption { if err != nil { return errors.NewErrInvalidOption("closedRefreshTimeout", timeout, err) } - b.cloedRefreshTimeout = d + b.closedRefreshTimeout = d return nil } } diff --git a/internal/client/v1/client/agent/core/client.go b/internal/client/v1/client/agent/core/client.go index 4b514a187e..9fa0e548c2 100644 --- a/internal/client/v1/client/agent/core/client.go +++ b/internal/client/v1/client/agent/core/client.go @@ -105,7 +105,7 @@ func (c *agentClient) CreateIndex( _, err := c.c.RoundRobin(ctx, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption, ) (any, error) { - return agent.NewAgentClient(conn).CreateIndex(ctx, req, copts...) + return NewAgentClient(conn).CreateIndex(ctx, req, copts...) }) return nil, err } @@ -122,7 +122,7 @@ func (c *agentClient) SaveIndex( _, err := c.c.RoundRobin(ctx, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption, ) (any, error) { - return agent.NewAgentClient(conn).SaveIndex(ctx, new(client.Empty), copts...) + return NewAgentClient(conn).SaveIndex(ctx, new(client.Empty), copts...) }) return nil, err } @@ -139,7 +139,7 @@ func (c *agentClient) CreateAndSaveIndex( _, err := c.c.RoundRobin(ctx, func(ctx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption, ) (any, error) { - return agent.NewAgentClient(conn).CreateAndSaveIndex(ctx, req, copts...) + return NewAgentClient(conn).CreateAndSaveIndex(ctx, req, copts...) }) return nil, err } diff --git a/internal/client/v1/client/discoverer/discover.go b/internal/client/v1/client/discoverer/discover.go index bc38107b8a..aa85a3368c 100644 --- a/internal/client/v1/client/discoverer/discover.go +++ b/internal/client/v1/client/discoverer/discover.go @@ -18,8 +18,10 @@ package discoverer import ( + "cmp" "context" "reflect" + "slices" "sync/atomic" "time" @@ -96,16 +98,16 @@ func (c *client) Start(ctx context.Context) (<-chan error, error) { } } - ech := make(chan error, 100) - addrs, err := c.dnsDiscovery(ctx, ech) - if err != nil { - close(ech) - return nil, err + addrs, err := c.updateDiscoveryInfo(ctx) + if err != nil || len(addrs) == 0 { + addrs, err = c.dnsDiscovery(ctx) + if err != nil { + return nil, err + } } - c.addrs.Store(&addrs) var aech <-chan error - if c.autoconn { + if c.client == nil { c.client = grpc.New( append( c.opts, @@ -113,23 +115,38 @@ func (c *client) Start(ctx context.Context) (<-chan error, error) { grpc.WithErrGroup(c.eg), )..., ) - if c.client != nil { - aech, err = c.client.StartConnectionMonitor(ctx) + aech, err = c.client.StartConnectionMonitor(ctx) + if err != nil { + return nil, err + } + for _, addr := range addrs { + if c.onConnect != nil { + err = c.onConnect(ctx, c, addr) + if err != nil { + return nil, err + } + } + } + } else { + for _, addr := range addrs { + err = c.connect(ctx, addr) if err != nil { - close(ech) return nil, err } } + aech, err = c.client.StartConnectionMonitor(ctx) } + if err != nil { + return nil, err + } + c.addrs.Store(&addrs) - err = c.discover(ctx, ech) + err = c.discover(ctx) if err != nil { - close(ech) return nil, errors.Join(c.dscClient.Close(ctx), err) } - + ech := make(chan error, 100) c.eg.Go(safety.RecoverFunc(func() (err error) { - defer close(ech) dt := time.NewTicker(c.dscDur) defer dt.Stop() finalize := func() (err error) { @@ -158,7 +175,7 @@ func (c *client) Start(ctx context.Context) (<-chan error, error) { case err = <-aech: case err = <-rrech: case <-dt.C: - err = c.discover(ctx, ech) + err = c.discover(ctx) } if err != nil { log.Error(err) @@ -177,14 +194,11 @@ func (c *client) Start(ctx context.Context) (<-chan error, error) { func (c *client) GetAddrs(ctx context.Context) (addrs []string) { a := c.addrs.Load() if a == nil { - ips, err := net.DefaultResolver.LookupIPAddr(ctx, c.dns) + var err error + addrs, err = c.dnsDiscovery(ctx) if err != nil { return nil } - addrs = make([]string, 0, len(ips)) - for _, ip := range ips { - addrs = append(addrs, ip.String()) - } } else { addrs = *a } @@ -217,7 +231,7 @@ func (c *client) GetReadClient() grpc.Client { func (c *client) connect(ctx context.Context, addr string) (err error) { if c.autoconn && c.client != nil { - _, err = c.client.Connect(ctx, addr) + _, err = c.client.Connect(ctx, addr, c.client.GetDialOption()...) if err != nil { return err } @@ -238,7 +252,7 @@ func (c *client) disconnect(ctx context.Context, addr string) (err error) { return } -func (c *client) dnsDiscovery(ctx context.Context, ech chan<- error) (addrs []string, err error) { +func (c *client) dnsDiscovery(ctx context.Context) (addrs []string, err error) { ips, err := net.DefaultResolver.LookupIPAddr(ctx, c.dns) if err != nil || len(ips) == 0 { return nil, errors.ErrAddrCouldNotDiscover(err, c.dns) @@ -249,7 +263,6 @@ func (c *client) dnsDiscovery(ctx context.Context, ech chan<- error) (addrs []st addr := net.JoinHostPort(ip.String(), uint16(c.port)) if err = c.connect(ctx, addr); err != nil { log.Debugf("dns discovery connect for addr = %s from dns = %s failed %v", addr, c.dns, err) - ech <- err } else { log.Debugf("dns discovery connect for addr = %s from dns = %s succeeded", addr, c.dns) addrs = append(addrs, addr) @@ -264,7 +277,7 @@ func (c *client) dnsDiscovery(ctx context.Context, ech chan<- error) (addrs []st return addrs, nil } -func (c *client) discover(ctx context.Context, ech chan<- error) (err error) { +func (c *client) discover(ctx context.Context) (err error) { if c.dscClient == nil || (c.autoconn && c.client == nil) { return errors.ErrGRPCClientNotFound } @@ -272,7 +285,7 @@ func (c *client) discover(ctx context.Context, ech chan<- error) (err error) { var connected []string if bo := c.client.GetBackoff(); bo != nil { _, err = bo.Do(ctx, func(ctx context.Context) (any, bool, error) { - connected, err = c.updateDiscoveryInfo(ctx, ech) + connected, err = c.updateDiscoveryInfo(ctx) if err != nil { if !errors.Is(err, errors.ErrGRPCClientNotFound) && !errors.Is(err, errors.ErrGRPCClientConnNotFound("*")) { @@ -283,11 +296,11 @@ func (c *client) discover(ctx context.Context, ech chan<- error) (err error) { return nil, false, nil }) } else { - connected, err = c.updateDiscoveryInfo(ctx, ech) + connected, err = c.updateDiscoveryInfo(ctx) } if err != nil { log.Warnf("failed to discover addrs from discoverer API, error: %v,\ttrying to dns discovery from %s...", err, c.dns) - connected, err = c.dnsDiscovery(ctx, ech) + connected, err = c.dnsDiscovery(ctx) if err != nil { return err } @@ -295,12 +308,10 @@ func (c *client) discover(ctx context.Context, ech chan<- error) (err error) { oldAddrs := c.GetAddrs(ctx) c.addrs.Store(&connected) - return c.disconnectOldAddrs(ctx, oldAddrs, connected, ech) + return c.disconnectOldAddrs(ctx, oldAddrs, connected) } -func (c *client) updateDiscoveryInfo( - ctx context.Context, ech chan<- error, -) (connected []string, err error) { +func (c *client) updateDiscoveryInfo(ctx context.Context) (connected []string, err error) { nodes, err := c.discoverNodes(ctx) if err != nil { log.Warnf("error detected when discovering nodes,\terrors: %v", err) @@ -310,7 +321,7 @@ func (c *client) updateDiscoveryInfo( log.Warn("no nodes found") return nil, errors.ErrNodeNotFound("all") } - connected, err = c.discoverAddrs(ctx, nodes, ech) + connected, err = c.discoverAddrs(ctx, nodes) if err != nil { return nil, err } @@ -343,19 +354,39 @@ func (c *client) discoverNodes(ctx context.Context) (nodes *payload.Info_Nodes, } return nodes, nil }) - return nodes, err + if err != nil { + return nil, err + } + slices.SortFunc(nodes.Nodes, func(left, right *payload.Info_Node) int { + if left.GetMemory() == nil || right.GetMemory() == nil { + return 0 // Default comparison value; adjust as needed. + } + return cmp.Compare(left.GetMemory().GetUsage(), right.GetMemory().GetUsage()) + }) + return nodes, nil } func (c *client) discoverAddrs( - ctx context.Context, nodes *payload.Info_Nodes, ech chan<- error, + ctx context.Context, nodes *payload.Info_Nodes, ) (addrs []string, err error) { + if nodes == nil { + return nil, errors.ErrAddrCouldNotDiscover(err, c.dns) + } maxPodLen := 0 podLength := 0 - for _, node := range nodes.GetNodes() { - l := len(node.GetPods().GetPods()) - podLength += l - if l > maxPodLen { - maxPodLen = l + for i, node := range nodes.GetNodes() { + if node != nil && node.GetPods() != nil && node.GetPods().GetPods() != nil { + l := len(node.GetPods().GetPods()) + podLength += l + if l > maxPodLen { + maxPodLen = l + } + slices.SortFunc(nodes.Nodes[i].Pods.Pods, func(left, right *payload.Info_Pod) int { + if left.GetMemory() == nil || right.GetMemory() == nil { + return 0 // Default comparison value; adjust as needed. + } + return cmp.Compare(left.GetMemory().GetUsage(), right.GetMemory().GetUsage()) + }) } } addrs = make([]string, 0, podLength) @@ -371,15 +402,12 @@ func (c *client) discoverAddrs( len(node.GetPods().GetPods()[i].GetIp()) != 0 { addr := net.JoinHostPort(node.GetPods().GetPods()[i].GetIp(), uint16(c.port)) if err = c.connect(ctx, addr); err != nil { - select { - case <-ctx.Done(): - return nil, ctx.Err() - case ech <- errors.ErrAddrCouldNotDiscover(err, addr): - } + log.Debugf("resource based discovery connect from discoverer API for addr = %s failed %v", addr, errors.ErrAddrCouldNotDiscover(err, addr)) err = nil } else { addrs = append(addrs, addr) } + } } } @@ -388,7 +416,7 @@ func (c *client) discoverAddrs( } func (c *client) disconnectOldAddrs( - ctx context.Context, oldAddrs, connectedAddrs []string, ech chan<- error, + ctx context.Context, oldAddrs, connectedAddrs []string, ) (err error) { if !c.autoconn { return nil @@ -404,7 +432,7 @@ func (c *client) disconnectOldAddrs( c.eg.Go(safety.RecoverFunc(func() error { err = c.disconnect(ctx, old) if err != nil { - ech <- err + log.Error(err) } return nil })) @@ -420,22 +448,12 @@ func (c *client) disconnectOldAddrs( if !ok { err = c.disconnect(ctx, addr) if err != nil { - select { - case <-ctx.Done(): - return errors.Join(ctx.Err(), err) - case ech <- err: - return err - } + return err } } return nil }); err != nil { - select { - case <-ctx.Done(): - return errors.Join(ctx.Err(), err) - case ech <- err: - return err - } + log.Error(err) } } return nil diff --git a/internal/client/v1/client/vald/vald.go b/internal/client/v1/client/vald/vald.go index b08ce4b1af..d79a7511a6 100644 --- a/internal/client/v1/client/vald/vald.go +++ b/internal/client/v1/client/vald/vald.go @@ -499,6 +499,28 @@ func (c *client) MultiUpdate( return res, nil } +func (c *client) UpdateTimestamp( + ctx context.Context, in *payload.Update_TimestampRequest, opts ...grpc.CallOption, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "internal/client/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + _, err = c.c.RoundRobin(ctx, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + res, err = vald.NewValdClient(conn).UpdateTimestamp(ctx, in, append(copts, opts...)...) + return nil, err + }) + if err != nil { + return nil, err + } + return res, nil +} + func (c *client) Upsert( ctx context.Context, in *payload.Upsert_Request, opts ...grpc.CallOption, ) (res *payload.Object_Location, err error) { @@ -1088,6 +1110,18 @@ func (c *singleClient) Update( return c.vc.Update(ctx, in, opts...) } +func (c *singleClient) UpdateTimestamp( + ctx context.Context, in *payload.Update_TimestampRequest, opts ...grpc.CallOption, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "internal/singleClient/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + return c.vc.UpdateTimestamp(ctx, in, opts...) +} + func (c *singleClient) StreamUpdate( ctx context.Context, opts ...grpc.CallOption, ) (res vald.Update_StreamUpdateClient, err error) { diff --git a/internal/client/v1/client/vald/vald_test.go b/internal/client/v1/client/vald/vald_test.go index c805a9b581..b855e9e1cf 100644 --- a/internal/client/v1/client/vald/vald_test.go +++ b/internal/client/v1/client/vald/vald_test.go @@ -2603,6 +2603,118 @@ package vald // } // } // +// func Test_client_UpdateTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// in *payload.Update_TimestampRequest +// opts []grpc.CallOption +// } +// type fields struct { +// addrs []string +// c grpc.Client +// } +// type want struct { +// wantRes *payload.Object_Location +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Location, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// addrs:nil, +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// addrs:nil, +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &client{ +// addrs: test.fields.addrs, +// c: test.fields.c, +// } +// +// gotRes, err := c.UpdateTimestamp(test.args.ctx, test.args.in, test.args.opts...) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_client_Upsert(t *testing.T) { // type args struct { // ctx context.Context @@ -4274,6 +4386,118 @@ package vald // } // } // +// func Test_client_IndexProperty(t *testing.T) { +// type args struct { +// ctx context.Context +// in *payload.Empty +// opts []grpc.CallOption +// } +// type fields struct { +// addrs []string +// c grpc.Client +// } +// type want struct { +// wantRes *payload.Info_Index_PropertyDetail +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Info_Index_PropertyDetail, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Info_Index_PropertyDetail, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// addrs:nil, +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// addrs:nil, +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &client{ +// addrs: test.fields.addrs, +// c: test.fields.c, +// } +// +// gotRes, err := c.IndexProperty(test.args.ctx, test.args.in, test.args.opts...) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_client_GetTimestamp(t *testing.T) { // type args struct { // ctx context.Context @@ -6495,6 +6719,114 @@ package vald // } // } // +// func Test_singleClient_UpdateTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// in *payload.Update_TimestampRequest +// opts []grpc.CallOption +// } +// type fields struct { +// vc vald.Client +// } +// type want struct { +// wantRes *payload.Object_Location +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Location, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// vc:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// vc:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &singleClient{ +// vc: test.fields.vc, +// } +// +// gotRes, err := c.UpdateTimestamp(test.args.ctx, test.args.in, test.args.opts...) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_singleClient_StreamUpdate(t *testing.T) { // type args struct { // ctx context.Context @@ -8319,6 +8651,114 @@ package vald // } // } // +// func Test_singleClient_IndexProperty(t *testing.T) { +// type args struct { +// ctx context.Context +// in *payload.Empty +// opts []grpc.CallOption +// } +// type fields struct { +// vc vald.Client +// } +// type want struct { +// wantRes *payload.Info_Index_PropertyDetail +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Info_Index_PropertyDetail, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Info_Index_PropertyDetail, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// vc:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in:nil, +// opts:nil, +// }, +// fields: fields { +// vc:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &singleClient{ +// vc: test.fields.vc, +// } +// +// gotRes, err := c.IndexProperty(test.args.ctx, test.args.in, test.args.opts...) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_singleClient_GetTimestamp(t *testing.T) { // type args struct { // ctx context.Context diff --git a/internal/compress/gob_test.go b/internal/compress/gob_test.go index 4efb42e216..9375ba9745 100644 --- a/internal/compress/gob_test.go +++ b/internal/compress/gob_test.go @@ -391,7 +391,7 @@ func Test_gobCompressor_Reader(t *testing.T) { src io.ReadCloser } type fields struct { - transcodr gob.Transcoder + transcoder gob.Transcoder } type want struct { want io.ReadCloser @@ -425,7 +425,7 @@ func Test_gobCompressor_Reader(t *testing.T) { src: rc, }, fields: fields{ - transcodr: &gob.MockTranscoder{ + transcoder: &gob.MockTranscoder{ NewDecoderFunc: func(r io.Reader) gob.Decoder { return dec }, @@ -457,7 +457,7 @@ func Test_gobCompressor_Reader(t *testing.T) { checkFunc = defaultCheckFunc } g := &gobCompressor{ - transcoder: test.fields.transcodr, + transcoder: test.fields.transcoder, } got, err := g.Reader(test.args.src) diff --git a/internal/compress/lz4_test.go b/internal/compress/lz4_test.go index 5a8138b3b7..0000b473bb 100644 --- a/internal/compress/lz4_test.go +++ b/internal/compress/lz4_test.go @@ -476,14 +476,14 @@ func Test_lz4Compressor_DecompressVector(t *testing.T) { }, }, { - name: "returns (nil, error) when decompresse fails", + name: "returns (nil, error) when decompress fails", args: args{ bs: []byte("vdaas/vald"), }, fields: fields{ gobc: &MockCompressor{ DecompressVectorFunc: func(bytes []byte) (vector []float32, err error) { - return nil, errors.New("decompresse err") + return nil, errors.New("decompress err") }, }, compressionLevel: 0, @@ -497,7 +497,7 @@ func Test_lz4Compressor_DecompressVector(t *testing.T) { }, want: want{ want: nil, - err: errors.New("decompresse err"), + err: errors.New("decompress err"), }, }, } @@ -653,7 +653,7 @@ func Test_lz4Compressor_Writer(t *testing.T) { w = new(lz4.MockWriter) ) return test{ - name: "returns (io.WriteCloser, nil) when no erro occurs", + name: "returns (io.WriteCloser, nil) when no error occurs", args: args{ dst: dst, }, diff --git a/internal/config/cassandra_test.go b/internal/config/cassandra_test.go index d79b372abf..74a3f99e37 100644 --- a/internal/config/cassandra_test.go +++ b/internal/config/cassandra_test.go @@ -283,7 +283,7 @@ func TestCassandra_Bind(t *testing.T) { key := "CASSANDRA_BIND_PASSWORD" val := "cassandra_password" return test{ - name: "return Cassandra struct when Password is set via the envirionment value", + name: "return Cassandra struct when Password is set via the environment value", fields: fields{ Password: "_" + key + "_", }, diff --git a/internal/config/faiss_test.go b/internal/config/faiss_test.go index 02bac18a25..1887d758ed 100644 --- a/internal/config/faiss_test.go +++ b/internal/config/faiss_test.go @@ -22,6 +22,7 @@ package config // Nlist int // M int // NbitsPerIdx int +// MethodType string // MetricType string // EnableInMemoryMode bool // AutoIndexCheckDuration string @@ -65,6 +66,7 @@ package config // Nlist:0, // M:0, // NbitsPerIdx:0, +// MethodType:"", // MetricType:"", // EnableInMemoryMode:false, // AutoIndexCheckDuration:"", @@ -102,6 +104,7 @@ package config // Nlist:0, // M:0, // NbitsPerIdx:0, +// MethodType:"", // MetricType:"", // EnableInMemoryMode:false, // AutoIndexCheckDuration:"", @@ -151,6 +154,7 @@ package config // Nlist: test.fields.Nlist, // M: test.fields.M, // NbitsPerIdx: test.fields.NbitsPerIdx, +// MethodType: test.fields.MethodType, // MetricType: test.fields.MetricType, // EnableInMemoryMode: test.fields.EnableInMemoryMode, // AutoIndexCheckDuration: test.fields.AutoIndexCheckDuration, diff --git a/internal/config/grpc.go b/internal/config/grpc.go index 07517f0cd8..4279bdc001 100644 --- a/internal/config/grpc.go +++ b/internal/config/grpc.go @@ -39,30 +39,38 @@ type GRPCClient struct { // CallOption represents the configurations for call option. type CallOption struct { - WaitForReady bool `json:"wait_for_ready" yaml:"wait_for_ready"` - MaxRetryRPCBufferSize int `json:"max_retry_rpc_buffer_size" yaml:"max_retry_rpc_buffer_size"` - MaxRecvMsgSize int `json:"max_recv_msg_size" yaml:"max_recv_msg_size"` - MaxSendMsgSize int `json:"max_send_msg_size" yaml:"max_send_msg_size"` + WaitForReady bool `json:"wait_for_ready" yaml:"wait_for_ready"` + MaxRetryRPCBufferSize int `json:"max_retry_rpc_buffer_size" yaml:"max_retry_rpc_buffer_size"` + MaxRecvMsgSize int `json:"max_recv_msg_size" yaml:"max_recv_msg_size"` + MaxSendMsgSize int `json:"max_send_msg_size" yaml:"max_send_msg_size"` + ContentSubtype string `json:"content_subtype" yaml:"content_subtype"` } // DialOption represents the configurations for dial option. type DialOption struct { - WriteBufferSize int `json:"write_buffer_size" yaml:"write_buffer_size"` - ReadBufferSize int `json:"read_buffer_size" yaml:"read_buffer_size"` - InitialWindowSize int `json:"initial_window_size" yaml:"initial_window_size"` - InitialConnectionWindowSize int `json:"initial_connection_window_size" yaml:"initial_connection_window_size"` - MaxMsgSize int `json:"max_msg_size" yaml:"max_msg_size"` - BackoffMaxDelay string `json:"backoff_max_delay" yaml:"backoff_max_delay"` - BackoffBaseDelay string `json:"backoff_base_delay" yaml:"backoff_base_delay"` - BackoffJitter float64 `json:"backoff_jitter" yaml:"backoff_jitter"` - BackoffMultiplier float64 `json:"backoff_multiplier" yaml:"backoff_multiplier"` - MinimumConnectionTimeout string `json:"min_connection_timeout" yaml:"min_connection_timeout"` - EnableBackoff bool `json:"enable_backoff" yaml:"enable_backoff"` - Insecure bool `json:"insecure" yaml:"insecure"` - Timeout string `json:"timeout" yaml:"timeout"` - Interceptors []string `json:"interceptors,omitempty" yaml:"interceptors"` - Net *Net `json:"net" yaml:"net"` - Keepalive *GRPCClientKeepalive `json:"keepalive" yaml:"keepalive"` + DisableRetry bool `json:"disable_retry,omitempty" yaml:"disable_retry"` + EnableBackoff bool `json:"enable_backoff,omitempty" yaml:"enable_backoff"` + Insecure bool `json:"insecure,omitempty" yaml:"insecure"` + SharedWriteBuffer bool `json:"shared_write_buffer,omitempty" yaml:"shared_write_buffer"` + InitialConnectionWindowSize int32 `json:"initial_connection_window_size,omitempty" yaml:"initial_connection_window_size"` + InitialWindowSize int32 `json:"initial_window_size,omitempty" yaml:"initial_window_size"` + MaxHeaderListSize uint32 `json:"max_header_list_size,omitempty" yaml:"max_header_list_size"` + MaxCallAttempts int `json:"max_call_attempts,omitempty" yaml:"max_call_attempts"` + MaxMsgSize int `json:"max_msg_size,omitempty" yaml:"max_msg_size"` + ReadBufferSize int `json:"read_buffer_size,omitempty" yaml:"read_buffer_size"` + WriteBufferSize int `json:"write_buffer_size,omitempty" yaml:"write_buffer_size"` + BackoffJitter float64 `json:"backoff_jitter,omitempty" yaml:"backoff_jitter"` + BackoffMultiplier float64 `json:"backoff_multiplier,omitempty" yaml:"backoff_multiplier"` + Authority string `json:"authority,omitempty" yaml:"authority"` + BackoffBaseDelay string `json:"backoff_base_delay,omitempty" yaml:"backoff_base_delay"` + BackoffMaxDelay string `json:"backoff_max_delay,omitempty" yaml:"backoff_max_delay"` + IdleTimeout string `json:"idle_timeout,omitempty" yaml:"idle_timeout"` + MinimumConnectionTimeout string `json:"min_connection_timeout,omitempty" yaml:"min_connection_timeout"` + Timeout string `json:"timeout,omitempty" yaml:"timeout"` + UserAgent string `json:"user_agent,omitempty" yaml:"user_agent"` + Interceptors []string `json:"interceptors,omitempty" yaml:"interceptors"` + Net *Net `json:"net,omitempty" yaml:"net"` + Keepalive *GRPCClientKeepalive `json:"keepalive,omitempty" yaml:"keepalive"` } // ConnectionPool represents the configurations for connection pool. @@ -144,14 +152,20 @@ func (g *GRPCClientKeepalive) Bind() *GRPCClientKeepalive { // Bind binds the actual data from the CallOption receiver fields. func (c *CallOption) Bind() *CallOption { + c.ContentSubtype = GetActualValue(c.ContentSubtype) return c } // Bind binds the actual data from the DialOption receiver fields. func (d *DialOption) Bind() *DialOption { + d.Authority = GetActualValue(d.Authority) + d.BackoffBaseDelay = GetActualValue(d.BackoffBaseDelay) d.BackoffMaxDelay = GetActualValue(d.BackoffMaxDelay) - d.Timeout = GetActualValue(d.Timeout) + d.IdleTimeout = GetActualValue(d.IdleTimeout) d.Interceptors = GetActualValues(d.Interceptors) + d.MinimumConnectionTimeout = GetActualValue(d.MinimumConnectionTimeout) + d.Timeout = GetActualValue(d.Timeout) + d.UserAgent = GetActualValue(d.UserAgent) return d } @@ -216,24 +230,31 @@ func (g *GRPCClient) Opts() ([]grpc.Option, error) { if g.CallOption != nil { opts = append(opts, - grpc.WithWaitForReady(g.CallOption.WaitForReady), - grpc.WithMaxRetryRPCBufferSize(g.CallOption.MaxRetryRPCBufferSize), + grpc.WithCallContentSubtype(g.CallOption.ContentSubtype), grpc.WithMaxRecvMsgSize(g.CallOption.MaxRecvMsgSize), + grpc.WithMaxRetryRPCBufferSize(g.CallOption.MaxRetryRPCBufferSize), grpc.WithMaxSendMsgSize(g.CallOption.MaxSendMsgSize), + grpc.WithWaitForReady(g.CallOption.WaitForReady), ) } if g.DialOption != nil { opts = append(opts, - grpc.WithWriteBufferSize(g.DialOption.WriteBufferSize), - grpc.WithReadBufferSize(g.DialOption.WriteBufferSize), - grpc.WithInitialWindowSize(g.DialOption.InitialWindowSize), - grpc.WithInitialConnectionWindowSize(g.DialOption.InitialWindowSize), - grpc.WithMaxMsgSize(g.DialOption.MaxMsgSize), - grpc.WithInsecure(g.DialOption.Insecure), - grpc.WithBackoffMaxDelay(g.DialOption.BackoffMaxDelay), + grpc.WithAuthority(g.DialOption.Authority), grpc.WithBackoffMaxDelay(g.DialOption.BackoffMaxDelay), grpc.WithClientInterceptors(g.DialOption.Interceptors...), + grpc.WithDisableRetry(g.DialOption.DisableRetry), + grpc.WithIdleTimeout(g.DialOption.IdleTimeout), + grpc.WithInitialConnectionWindowSize(g.DialOption.InitialConnectionWindowSize), + grpc.WithInitialWindowSize(g.DialOption.InitialWindowSize), + grpc.WithInsecure(g.DialOption.Insecure), + grpc.WithMaxCallAttempts(g.DialOption.MaxCallAttempts), + grpc.WithMaxHeaderListSize(g.DialOption.MaxHeaderListSize), + grpc.WithMaxMsgSize(g.DialOption.MaxMsgSize), + grpc.WithReadBufferSize(g.DialOption.ReadBufferSize), + grpc.WithSharedWriteBuffer(g.DialOption.SharedWriteBuffer), + grpc.WithUserAgent(g.DialOption.UserAgent), + grpc.WithWriteBufferSize(g.DialOption.WriteBufferSize), ) if g.DialOption.Net != nil && @@ -251,8 +272,12 @@ func (g *GRPCClient) Opts() ([]grpc.Option, error) { if err != nil { return nil, err } + network := g.DialOption.Net.Network + if network == "" { + network = net.TCP.String() + } opts = append(opts, - grpc.WithDialer(der), + grpc.WithDialer(network, der), ) } diff --git a/internal/config/grpc_test.go b/internal/config/grpc_test.go index d61b9d70ad..df3ef76c45 100644 --- a/internal/config/grpc_test.go +++ b/internal/config/grpc_test.go @@ -504,8 +504,8 @@ func TestDialOption_Bind(t *testing.T) { type fields struct { WriteBufferSize int ReadBufferSize int - InitialWindowSize int - InitialConnectionWindowSize int + InitialWindowSize int32 + InitialConnectionWindowSize int32 MaxMsgSize int BackoffMaxDelay string BackoffBaseDelay string @@ -539,8 +539,8 @@ func TestDialOption_Bind(t *testing.T) { func() test { writeBufferSize := 10000 readBufferSize := 10000 - initialWindowSize := 100 - initialConnectionWindowSize := 100 + initialWindowSize := int32(100) + initialConnectionWindowSize := int32(100) maxMsgSize := 1000 backoffMaxDelay := "3m" backoffBaseDelay := "1m" @@ -706,7 +706,7 @@ func TestGRPCClient_Opts(t *testing.T) { } tests := []test{ { - name: "return 25 grpc.Option and nil error when all parameters are set", + name: "return 32 grpc.Option and nil error when all parameters are set", fields: fields{ Addrs: []string{ "10.40.3.342", @@ -778,7 +778,7 @@ func TestGRPCClient_Opts(t *testing.T) { }, }, want: want{ - want: make([]grpc.Option, 25), + want: make([]grpc.Option, 32), }, }, { diff --git a/internal/config/index_deleter.go b/internal/config/index_deleter.go new file mode 100644 index 0000000000..78d6e5786a --- /dev/null +++ b/internal/config/index_deleter.go @@ -0,0 +1,61 @@ +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package config + +// IndexDeleter represents the configurations for index deletion. +type IndexDeleter struct { + // IndexID represent target delete ID + IndexID string `json:"index_id" yaml:"index_id"` + + // AgentPort represent agent port number + AgentPort int `json:"agent_port" yaml:"agent_port"` + + // AgentName represent agents meta_name for service discovery + AgentName string `json:"agent_name" yaml:"agent_name"` + + // AgentNamespace represent agent namespace location + AgentNamespace string `json:"agent_namespace" yaml:"agent_namespace"` + + // AgentDNS represent agents dns A record for service discovery + AgentDNS string `json:"agent_dns" yaml:"agent_dns"` + + // NodeName represents node name + NodeName string `json:"node_name" yaml:"node_name"` + + // Concurrency represents indexing concurrency. + Concurrency int `json:"concurrency" yaml:"concurrency"` + + // DeletionPoolSize represents batch pool size for indexing. + DeletionPoolSize uint32 `json:"deletion_pool_size" yaml:"deletion_pool_size"` + + // TargetAddrs represents indexing target addresses. + TargetAddrs []string `json:"target_addrs" yaml:"target_addrs"` + + // Discoverer represents agent discoverer service configuration. + Discoverer *DiscovererClient `json:"discoverer" yaml:"discoverer"` +} + +func (ic *IndexDeleter) Bind() *IndexDeleter { + ic.IndexID = GetActualValue(ic.IndexID) + ic.AgentName = GetActualValue(ic.AgentName) + ic.AgentNamespace = GetActualValue(ic.AgentNamespace) + ic.AgentDNS = GetActualValue(ic.AgentDNS) + ic.NodeName = GetActualValue(ic.NodeName) + ic.TargetAddrs = GetActualValues(ic.TargetAddrs) + + if ic.Discoverer != nil { + ic.Discoverer.Bind() + } + return ic +} diff --git a/internal/config/log.go b/internal/config/log.go index 291a5971ad..c79583fdfe 100644 --- a/internal/config/log.go +++ b/internal/config/log.go @@ -24,7 +24,7 @@ type Logging struct { Format string `json:"format" yaml:"format"` } -// Bind returns Logging object whose every value is field value or envirionment value. +// Bind returns Logging object whose every value is field value or environment value. func (l *Logging) Bind() *Logging { l.Logger = GetActualValue(l.Logger) l.Level = GetActualValue(l.Level) diff --git a/internal/config/net.go b/internal/config/net.go index 9b90858cd8..550cb56029 100644 --- a/internal/config/net.go +++ b/internal/config/net.go @@ -25,6 +25,7 @@ import ( // Net represents the network configuration tcp, udp, unix domain socket. type Net struct { + Network string `json:"network,omitempty" yaml:"network"` DNS *DNS `json:"dns,omitempty" yaml:"dns"` Dialer *Dialer `json:"dialer,omitempty" yaml:"dialer"` SocketOption *SocketOption `json:"socket_option,omitempty" yaml:"socket_option"` @@ -117,6 +118,7 @@ func (s *SocketOption) ToSocketFlag() control.SocketFlag { // Bind binds the actual data from the Net fields. func (t *Net) Bind() *Net { + t.Network = GetActualValue(t.Network) if t.TLS != nil { t.TLS = t.TLS.Bind() } diff --git a/internal/config/server.go b/internal/config/server.go index 8b94acd739..e588891924 100644 --- a/internal/config/server.go +++ b/internal/config/server.go @@ -21,7 +21,6 @@ import ( "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/grpc/admin" - "github.com/vdaas/vald/internal/net/grpc/health" "github.com/vdaas/vald/internal/net/grpc/reflection" "github.com/vdaas/vald/internal/servers/server" "github.com/vdaas/vald/internal/strings" @@ -92,20 +91,25 @@ type HTTP2 struct { // GRPC represents the configuration for gPRC. type GRPC struct { - Keepalive *GRPCKeepalive `json:"keepalive,omitempty" yaml:"keepalive"` - ConnectionTimeout string `json:"connection_timeout,omitempty" yaml:"connection_timeout"` - Interceptors []string `json:"interceptors,omitempty" yaml:"interceptors"` - EnableReflection bool `json:"enable_reflection,omitempty" yaml:"enable_reflection"` EnableAdmin bool `json:"enable_admin,omitempty" yaml:"enable_admin"` + EnableChannelz bool `json:"enable_channelz,omitempty" yaml:"enable_channelz"` + EnableReflection bool `json:"enable_reflection,omitempty" yaml:"enable_reflection"` + SharedWriteBuffer bool `json:"shared_write_buffer,omitempty" yaml:"shared_write_buffer"` + WaitForHandlers bool `json:"wait_for_handlers,omitempty" yaml:"wait_for_handlers"` + HeaderTableSize uint32 `json:"header_table_size,omitempty" yaml:"header_table_size"` + MaxConcurrentStreams uint32 `json:"max_concurrent_streams,omitempty" yaml:"max_concurrent_streams"` + MaxHeaderListSize uint32 `json:"max_header_list_size,omitempty" yaml:"max_header_list_size"` + NumStreamWorkers uint32 `json:"num_stream_workers,omitempty" yaml:"num_stream_workers"` BidirectionalStreamConcurrency int `json:"bidirectional_stream_concurrency,omitempty" yaml:"bidirectional_stream_concurrency"` + InitialConnWindowSize int `json:"initial_conn_window_size,omitempty" yaml:"initial_conn_window_size"` + InitialWindowSize int `json:"initial_window_size,omitempty" yaml:"initial_window_size"` MaxReceiveMessageSize int `json:"max_receive_message_size,omitempty" yaml:"max_receive_message_size"` MaxSendMessageSize int `json:"max_send_message_size,omitempty" yaml:"max_send_message_size"` - InitialWindowSize int `json:"initial_window_size,omitempty" yaml:"initial_window_size"` - InitialConnWindowSize int `json:"initial_conn_window_size,omitempty" yaml:"initial_conn_window_size"` - WriteBufferSize int `json:"write_buffer_size,omitempty" yaml:"write_buffer_size"` ReadBufferSize int `json:"read_buffer_size,omitempty" yaml:"read_buffer_size"` - MaxHeaderListSize int `json:"max_header_list_size,omitempty" yaml:"max_header_list_size"` - HeaderTableSize int `json:"header_table_size,omitempty" yaml:"header_table_size"` + WriteBufferSize int `json:"write_buffer_size,omitempty" yaml:"write_buffer_size"` + ConnectionTimeout string `json:"connection_timeout,omitempty" yaml:"connection_timeout"` + Interceptors []string `json:"interceptors,omitempty" yaml:"interceptors"` + Keepalive *GRPCKeepalive `json:"keepalive,omitempty" yaml:"keepalive"` } // GRPCKeepalive represents the configuration for gRPC keep-alive. @@ -292,27 +296,29 @@ func (s *Server) Opts() []server.Option { if s.GRPC != nil { opts = append(opts, server.WithServerMode(mode), - server.WithGRPCMaxReceiveMessageSize(s.GRPC.MaxReceiveMessageSize), - server.WithGRPCMaxSendMessageSize(s.GRPC.MaxSendMessageSize), - server.WithGRPCInitialWindowSize(s.GRPC.InitialWindowSize), - server.WithGRPCInitialConnWindowSize(s.GRPC.InitialConnWindowSize), - server.WithGRPCWriteBufferSize(s.GRPC.WriteBufferSize), - server.WithGRPCReadBufferSize(s.GRPC.ReadBufferSize), server.WithGRPCConnectionTimeout(s.GRPC.ConnectionTimeout), - server.WithGRPCMaxHeaderListSize(s.GRPC.MaxHeaderListSize), server.WithGRPCHeaderTableSize(s.GRPC.HeaderTableSize), + server.WithGRPCInitialConnWindowSize(s.GRPC.InitialConnWindowSize), + server.WithGRPCInitialWindowSize(s.GRPC.InitialWindowSize), server.WithGRPCInterceptors(s.GRPC.Interceptors...), - server.WithGRPCRegistFunc(func(srv *grpc.Server) { - health.Register(s.Name, srv) - }), + server.WithGRPCMaxConcurrentStreams(s.GRPC.MaxConcurrentStreams), + server.WithGRPCMaxHeaderListSize(s.GRPC.MaxHeaderListSize), + server.WithGRPCMaxReceiveMessageSize(s.GRPC.MaxReceiveMessageSize), + server.WithGRPCMaxSendMessageSize(s.GRPC.MaxSendMessageSize), + server.WithGRPCNumStreamWorkers(s.GRPC.NumStreamWorkers), + server.WithGRPCReadBufferSize(s.GRPC.ReadBufferSize), + server.WithGRPCSharedWriteBuffer(s.GRPC.SharedWriteBuffer), + server.WithGRPCWaitForHandlers(s.GRPC.WaitForHandlers), + server.WithGRPCWriteBufferSize(s.GRPC.WriteBufferSize), ) + if s.GRPC.EnableReflection { opts = append(opts, server.WithGRPCRegistFunc(func(srv *grpc.Server) { reflection.Register(srv) })) } - if s.GRPC.EnableAdmin { + if s.GRPC.EnableAdmin || s.GRPC.EnableChannelz { opts = append(opts, server.WithGRPCRegistFunc(func(srv *grpc.Server) { admin.Register(srv) diff --git a/internal/config/server_test.go b/internal/config/server_test.go index bf8f4aa4ae..e2595e9cc5 100644 --- a/internal/config/server_test.go +++ b/internal/config/server_test.go @@ -640,8 +640,8 @@ func TestGRPC_Bind(t *testing.T) { WriteBufferSize int ReadBufferSize int ConnectionTimeout string - MaxHeaderListSize int - HeaderTableSize int + MaxHeaderListSize uint32 + HeaderTableSize uint32 Interceptors []string EnableReflection bool } @@ -679,8 +679,8 @@ func TestGRPC_Bind(t *testing.T) { writeBufferSize := 3 readBufferSize := 3 connectionTimeout := "3s" - maxHeaderListSize := 5 - headerTableSize := 1 + maxHeaderListSize := uint32(5) + headerTableSize := uint32(1) interceptors := []string{ "RecoverInterceptor", } @@ -741,8 +741,8 @@ func TestGRPC_Bind(t *testing.T) { } writeBufferSize := 3 readBufferSize := 3 - maxHeaderListSize := 5 - headerTableSize := 1 + maxHeaderListSize := uint32(5) + headerTableSize := uint32(1) enableReflection := true return test{ name: "return GRPC when some parameters are set as environment value", @@ -1369,7 +1369,7 @@ func TestServer_Opts(t *testing.T) { }, }, { - name: "return 13 server.Options when NETWORK is empty, MODE is GRPC", + name: "return 31 server.Options when NETWORK is empty, MODE is GRPC", fields: fields{ Name: "vald-agent-ngt", Host: "0.0.0.0", @@ -1424,7 +1424,7 @@ func TestServer_Opts(t *testing.T) { Restart: false, }, want: want{ - want: make([]server.Option, 28), + want: make([]server.Option, 31), }, }, } diff --git a/internal/core/algorithm/faiss/faiss.go b/internal/core/algorithm/faiss/faiss.go index 2c4f3d5bd6..1c731bc44f 100644 --- a/internal/core/algorithm/faiss/faiss.go +++ b/internal/core/algorithm/faiss/faiss.go @@ -24,11 +24,11 @@ package faiss import "C" import ( - "sync" "unsafe" "github.com/vdaas/vald/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/sync" ) type ( diff --git a/internal/core/algorithm/faiss/option.go b/internal/core/algorithm/faiss/option.go index 62cd0b85d6..e66aecb695 100644 --- a/internal/core/algorithm/faiss/option.go +++ b/internal/core/algorithm/faiss/option.go @@ -24,10 +24,9 @@ package faiss import "C" import ( - "strings" - "github.com/vdaas/vald/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/strings" ) // Option represents the functional option for faiss. diff --git a/internal/core/algorithm/ngt/ngt.go b/internal/core/algorithm/ngt/ngt.go index 15b2ec4e6b..99dee9c347 100644 --- a/internal/core/algorithm/ngt/ngt.go +++ b/internal/core/algorithm/ngt/ngt.go @@ -35,7 +35,9 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/file" "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/sync" + "github.com/vdaas/vald/internal/sync/singleflight" ) type ( @@ -85,7 +87,7 @@ type ( // GetVector returns vector stored in NGT index. GetVector(id uint) ([]float32, error) - GetGraphStatistics(m statisticsType) (stats *GraphStatistics, err error) + GetGraphStatistics(ctx context.Context, m statisticsType) (stats *GraphStatistics, err error) // GetProperty returns NGT Index Property. GetProperty() (*Property, error) @@ -114,8 +116,10 @@ type ( epl uint64 // NGT error buffer pool size limit index C.NGTIndex ospace C.NGTObjectSpace + group singleflight.Group[*GraphStatistics] mu *sync.RWMutex cmu *sync.RWMutex + smu *sync.Mutex } ngtError struct { @@ -479,7 +483,8 @@ func gen(isLoad bool, opts ...Option) (NGT, error) { ) n.mu = new(sync.RWMutex) n.cmu = new(sync.RWMutex) - + n.smu = new(sync.Mutex) + n.group = singleflight.New[*GraphStatistics]() defer func() { if err != nil { n.Close() @@ -568,6 +573,8 @@ func (n *ngt) create() (err error) { path := C.CString(n.idxPath) defer C.free(unsafe.Pointer(path)) + n.smu.Lock() + defer n.smu.Unlock() ne := n.GetErrorBuffer() if !n.inMemory { n.index = C.ngt_create_graph_and_tree(path, n.prop, ne.err) @@ -596,6 +603,8 @@ func (n *ngt) open() error { path := C.CString(n.idxPath) defer C.free(unsafe.Pointer(path)) + n.smu.Lock() + defer n.smu.Unlock() ne := n.GetErrorBuffer() n.index = C.ngt_open_index(path, ne.err) if n.index == nil { @@ -884,6 +893,8 @@ func (n *ngt) CreateIndex(poolSize uint32) error { if poolSize == 0 { poolSize = n.poolSize } + n.smu.Lock() + defer n.smu.Unlock() ne := n.GetErrorBuffer() n.lock(true) ret := C.ngt_create_index(n.index, C.uint32_t(poolSize), ne.err) @@ -901,6 +912,8 @@ func (n *ngt) SaveIndex() error { if !n.inMemory { path := C.CString(n.idxPath) defer C.free(unsafe.Pointer(path)) + n.smu.Lock() + defer n.smu.Unlock() ne := n.GetErrorBuffer() n.rLock(true) ret := C.ngt_save_index(n.index, path, ne.err) @@ -919,6 +932,8 @@ func (n *ngt) SaveIndexWithPath(idxPath string) error { if !n.inMemory && len(idxPath) != 0 { path := C.CString(idxPath) defer C.free(unsafe.Pointer(path)) + n.smu.Lock() + defer n.smu.Unlock() ne := n.GetErrorBuffer() n.rLock(true) ret := C.ngt_save_index(n.index, path, ne.err) @@ -1071,6 +1086,8 @@ func (n *ngt) rUnlock(cLock bool) { // Close NGT index. func (n *ngt) Close() { if n.index != nil { + n.smu.Lock() + defer n.smu.Unlock() C.ngt_close_index(n.index) n.index = nil n.prop = nil @@ -1079,36 +1096,39 @@ func (n *ngt) Close() { } func fromCGraphStatistics(cstats *C.NGTGraphStatistics) *GraphStatistics { + if cstats == nil { + return nil + } goStats := &GraphStatistics{ - NumberOfObjects: uint64(cstats.numberOfObjects), - NumberOfIndexedObjects: uint64(cstats.numberOfIndexedObjects), - SizeOfObjectRepository: uint64(cstats.sizeOfObjectRepository), - SizeOfRefinementObjectRepository: uint64(cstats.sizeOfRefinementObjectRepository), - NumberOfRemovedObjects: uint64(cstats.numberOfRemovedObjects), - NumberOfNodes: uint64(cstats.numberOfNodes), - NumberOfEdges: uint64(cstats.numberOfEdges), + C1Indegree: float64(cstats.c1Indegree), + C5Indegree: float64(cstats.c5Indegree), + C95Outdegree: float64(cstats.c95Outdegree), + C99Outdegree: float64(cstats.c99Outdegree), + MaxNumberOfIndegree: uint64(cstats.maxNumberOfIndegree), + MaxNumberOfOutdegree: uint64(cstats.maxNumberOfOutdegree), MeanEdgeLength: float64(cstats.meanEdgeLength), + MeanEdgeLengthFor10Edges: float64(cstats.meanEdgeLengthFor10Edges), + MeanIndegreeDistanceFor10Edges: float64(cstats.meanIndegreeDistanceFor10Edges), MeanNumberOfEdgesPerNode: float64(cstats.meanNumberOfEdgesPerNode), - NumberOfNodesWithoutEdges: uint64(cstats.numberOfNodesWithoutEdges), - MaxNumberOfOutdegree: uint64(cstats.maxNumberOfOutdegree), - MinNumberOfOutdegree: uint64(cstats.minNumberOfOutdegree), - NumberOfNodesWithoutIndegree: uint64(cstats.numberOfNodesWithoutIndegree), - MaxNumberOfIndegree: uint64(cstats.maxNumberOfIndegree), + MedianIndegree: int32(cstats.medianIndegree), + MedianOutdegree: int32(cstats.medianOutdegree), MinNumberOfIndegree: uint64(cstats.minNumberOfIndegree), - MeanEdgeLengthFor10Edges: float64(cstats.meanEdgeLengthFor10Edges), + MinNumberOfOutdegree: uint64(cstats.minNumberOfOutdegree), + ModeIndegree: uint64(cstats.modeIndegree), + ModeOutdegree: uint64(cstats.modeOutdegree), NodesSkippedFor10Edges: uint64(cstats.nodesSkippedFor10Edges), - MeanIndegreeDistanceFor10Edges: float64(cstats.meanIndegreeDistanceFor10Edges), NodesSkippedForIndegreeDistance: uint64(cstats.nodesSkippedForIndegreeDistance), - VarianceOfOutdegree: float64(cstats.varianceOfOutdegree), + NumberOfEdges: uint64(cstats.numberOfEdges), + NumberOfIndexedObjects: uint64(cstats.numberOfIndexedObjects), + NumberOfNodes: uint64(cstats.numberOfNodes), + NumberOfNodesWithoutEdges: uint64(cstats.numberOfNodesWithoutEdges), + NumberOfNodesWithoutIndegree: uint64(cstats.numberOfNodesWithoutIndegree), + NumberOfObjects: uint64(cstats.numberOfObjects), + NumberOfRemovedObjects: uint64(cstats.numberOfRemovedObjects), + SizeOfObjectRepository: uint64(cstats.sizeOfObjectRepository), + SizeOfRefinementObjectRepository: uint64(cstats.sizeOfRefinementObjectRepository), VarianceOfIndegree: float64(cstats.varianceOfIndegree), - MedianOutdegree: int32(cstats.medianOutdegree), - ModeOutdegree: uint64(cstats.modeOutdegree), - C95Outdegree: float64(cstats.c95Outdegree), - C99Outdegree: float64(cstats.c99Outdegree), - MedianIndegree: int32(cstats.medianIndegree), - ModeIndegree: uint64(cstats.modeIndegree), - C5Indegree: float64(cstats.c5Indegree), - C1Indegree: float64(cstats.c1Indegree), + VarianceOfOutdegree: float64(cstats.varianceOfOutdegree), Valid: bool(cstats.valid), } @@ -1139,22 +1159,53 @@ func fromCGraphStatistics(cstats *C.NGTGraphStatistics) *GraphStatistics { return goStats } -func (n *ngt) GetGraphStatistics(m statisticsType) (stats *GraphStatistics, err error) { - var mode rune - switch m { - case NormalStatistics: - mode = '-' - case AdditionalStatistics: - mode = 'a' - } - ne := n.GetErrorBuffer() - cstats := C.ngt_get_graph_statistics(n.index, C.char(mode), C.size_t(n.ces), ne.err) - if !cstats.valid { - return nil, n.newGoError(ne) +func (n *ngt) GetGraphStatistics( + ctx context.Context, m statisticsType, +) (stats *GraphStatistics, err error) { + return n.getGraphStatistics(ctx, m, 10) +} + +func (n *ngt) getGraphStatistics( + ctx context.Context, m statisticsType, cnt int, +) (stats *GraphStatistics, err error) { + var shared bool + stats, shared, err = n.group.Do(ctx, "GetGraphStatistics", func(context.Context) (stats *GraphStatistics, err error) { + n.smu.Lock() + defer n.smu.Unlock() + var mode rune + switch m { + case NormalStatistics: + mode = '-' + case AdditionalStatistics: + mode = 'a' + } + err = safety.RecoverFunc(func() (err error) { + ne := n.GetErrorBuffer() + cstats := C.ngt_get_graph_statistics(n.index, C.char(mode), C.size_t(n.ces), ne.err) + if !cstats.valid { + return n.newGoError(ne) + } + n.PutErrorBuffer(ne) + defer C.ngt_free_graph_statistics(&cstats) + stats = fromCGraphStatistics(&cstats) + if stats == nil { + return errors.ErrNGTIndexStatisticsNotReady + } + return nil + })() + if err != nil { + return nil, err + } + return stats, nil + }) + if err != nil { + if shared && cnt > 0 && !errors.Is(err, errors.ErrNGTIndexStatisticsNotReady) { + cnt-- + return n.getGraphStatistics(ctx, m, cnt) + } + return nil, err } - n.PutErrorBuffer(ne) - defer C.ngt_free_graph_statistics(&cstats) - return fromCGraphStatistics(&cstats), nil + return stats, nil } func (n *ngt) GetProperty() (prop *Property, err error) { diff --git a/internal/core/algorithm/ngt/ngt_test.go b/internal/core/algorithm/ngt/ngt_test.go index bf0604ffa1..1939330dda 100644 --- a/internal/core/algorithm/ngt/ngt_test.go +++ b/internal/core/algorithm/ngt/ngt_test.go @@ -33,6 +33,7 @@ import ( "github.com/vdaas/vald/internal/log/logger" "github.com/vdaas/vald/internal/strings" "github.com/vdaas/vald/internal/sync" + "github.com/vdaas/vald/internal/sync/singleflight" "github.com/vdaas/vald/internal/test/comparator" "github.com/vdaas/vald/internal/test/goleak" ) @@ -43,8 +44,9 @@ var ( // !!! These fields will not be verified in the entire test // Do not validate C dependencies comparator.IgnoreFields(ngt{}, - "dimension", "prop", "epool", "index", "ospace", "eps"), + "dimension", "prop", "epool", "index", "ospace", "eps", "group"), comparator.RWMutexComparer, + comparator.MutexComparer, comparator.ErrorComparer, comparator.AtomicUint64Comparator, } @@ -103,7 +105,7 @@ func TestNew(t *testing.T) { beforeFunc func(args) afterFunc func(*testing.T, NGT) error } - defaultComprators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { + defaultComparators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { return s1 == s2 }))) defaultCheckFunc := func(w want, got NGT, err error, comparators ...comparator.Option) error { @@ -140,8 +142,10 @@ func TestNew(t *testing.T) { bulkInsertChunkSize: 100, ces: 10, objectType: Float, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -168,8 +172,10 @@ func TestNew(t *testing.T) { poolSize: DefaultPoolSize, bulkInsertChunkSize: 100, objectType: Float, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -195,8 +201,10 @@ func TestNew(t *testing.T) { poolSize: 100, bulkInsertChunkSize: 100, objectType: Uint8, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -234,7 +242,7 @@ func TestNew(t *testing.T) { } comparators := test.comparators if test.comparators == nil || len(test.comparators) == 0 { - comparators = defaultComprators + comparators = defaultComparators } got, err := New(test.args.opts...) @@ -326,8 +334,10 @@ func TestLoad(t *testing.T) { poolSize: DefaultPoolSize, bulkInsertChunkSize: 100, objectType: Uint8, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -394,8 +404,10 @@ func TestLoad(t *testing.T) { poolSize: DefaultPoolSize, bulkInsertChunkSize: 100, objectType: Uint8, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -462,8 +474,10 @@ func TestLoad(t *testing.T) { poolSize: DefaultPoolSize, bulkInsertChunkSize: 100, objectType: Float, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -530,8 +544,10 @@ func TestLoad(t *testing.T) { poolSize: DefaultPoolSize, bulkInsertChunkSize: 100, objectType: Float, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -692,7 +708,7 @@ func Test_gen(t *testing.T) { beforeFunc func(*testing.T, args) afterFunc func(*testing.T, NGT) error } - defaultComprators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { + defaultComparators := append(ngtComparator, comparator.CompareField("idxPath", comparator.Comparer(func(s1, s2 string) bool { return s1 == s2 }))) defaultCheckFunc := func(_ context.Context, w want, got NGT, err error, comparators ...comparator.Option) error { @@ -728,8 +744,10 @@ func Test_gen(t *testing.T) { poolSize: DefaultPoolSize, bulkInsertChunkSize: 100, objectType: Float, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -777,8 +795,10 @@ func Test_gen(t *testing.T) { poolSize: DefaultPoolSize, bulkInsertChunkSize: 100, objectType: Uint8, - mu: &sync.RWMutex{}, - cmu: &sync.RWMutex{}, + mu: new(sync.RWMutex), + cmu: new(sync.RWMutex), + smu: new(sync.Mutex), + group: singleflight.New[*GraphStatistics](), epl: DefaultErrorBufferLimit, }, }, @@ -839,7 +859,7 @@ func Test_gen(t *testing.T) { } comparators := test.comparators if test.comparators == nil || len(test.comparators) == 0 { - comparators = defaultComprators + comparators = defaultComparators } got, err := gen(test.args.isLoad, test.args.opts...) @@ -1049,7 +1069,7 @@ func Test_ngt_loadOptions(t *testing.T) { }, }, { - name: "load option failed with Ignoreable error", + name: "load option failed with Ignorable error", args: args{ opts: []Option{ func(n *ngt) error { @@ -1107,7 +1127,7 @@ func Test_ngt_loadOptions(t *testing.T) { func Test_ngt_create(t *testing.T) { // This test is skipped because it requires ngt.prop to be set probably. // We cannot initialize ngt.prop since it is C dependencies. - // This function is called by New(), and the ngt.prop is destoried in New(), so we cannot test this function individually. + // This function is called by New(), and the ngt.prop is destroyed in New(), so we cannot test this function individually. t.SkipNow() } @@ -1123,6 +1143,7 @@ func Test_ngt_open(t *testing.T) { poolSize uint32 mu *sync.RWMutex cmu *sync.RWMutex + smu *sync.Mutex } type want struct { err error @@ -1148,6 +1169,7 @@ func Test_ngt_open(t *testing.T) { epsilon: fields.epsilon, poolSize: fields.poolSize, mu: fields.mu, + smu: fields.smu, } if err := n.setup(); err != nil { t.Error(err) @@ -1171,6 +1193,7 @@ func Test_ngt_open(t *testing.T) { objectType: Float, mu: &sync.RWMutex{}, cmu: &sync.RWMutex{}, + smu: &sync.Mutex{}, }, beforeFunc: func(t *testing.T, fields fields) { t.Helper() @@ -1206,6 +1229,7 @@ func Test_ngt_open(t *testing.T) { objectType: Float, mu: &sync.RWMutex{}, cmu: &sync.RWMutex{}, + smu: &sync.Mutex{}, }, want: want{ err: errors.ErrIndexFileNotFound, @@ -1220,6 +1244,7 @@ func Test_ngt_open(t *testing.T) { objectType: Float, mu: &sync.RWMutex{}, cmu: &sync.RWMutex{}, + smu: &sync.Mutex{}, }, beforeFunc: func(t *testing.T, fields fields) { t.Helper() @@ -1476,7 +1501,7 @@ func Test_ngt_Search(t *testing.T) { }, }, { - name: "resturn vector id after the nearby vector inserted (uint8)", + name: "return vector id after the nearby vector inserted (uint8)", args: args{ ctx: context.Background(), vec: []float32{1, 2, 3, 4, 5, 6, 7, 8, 9}, @@ -1653,7 +1678,7 @@ func Test_ngt_Search(t *testing.T) { }, }, { - name: "resturn vector id after the nearby vector inserted (float)", + name: "return vector id after the nearby vector inserted (float)", args: args{ ctx: context.Background(), vec: []float32{0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.91}, @@ -2281,7 +2306,8 @@ func Test_ngt_InsertCommit(t *testing.T) { { name: "return object id when object type is uint8", args: args{ - vec: []float32{0, 1, 2, 3, 4, 5, 6, 7, 8}, + vec: []float32{0, 1, 2, 3, 4, 5, 6, 7, 8}, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2296,7 +2322,8 @@ func Test_ngt_InsertCommit(t *testing.T) { { name: "return object id when object type is uint8 and all vector elem are 0", args: args{ - vec: []float32{0, 0, 0, 0, 0, 0, 0, 0, 0}, + vec: []float32{0, 0, 0, 0, 0, 0, 0, 0, 0}, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2315,6 +2342,7 @@ func Test_ngt_InsertCommit(t *testing.T) { math.MinInt8, math.MinInt8, math.MinInt8, math.MinInt8, math.MinInt8, math.MinInt8, math.MinInt8, math.MinInt8, math.MinInt8, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2333,6 +2361,7 @@ func Test_ngt_InsertCommit(t *testing.T) { math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8, math.MaxUint8, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2347,7 +2376,8 @@ func Test_ngt_InsertCommit(t *testing.T) { { name: "return object id when object type is float", args: args{ - vec: []float32{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, + vec: []float32{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2362,7 +2392,8 @@ func Test_ngt_InsertCommit(t *testing.T) { { name: "return object id when object type is float and all vector elem are 0", args: args{ - vec: []float32{0, 0, 0, 0, 0, 0, 0, 0, 0}, + vec: []float32{0, 0, 0, 0, 0, 0, 0, 0, 0}, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2381,6 +2412,7 @@ func Test_ngt_InsertCommit(t *testing.T) { math.SmallestNonzeroFloat32, math.SmallestNonzeroFloat32, math.SmallestNonzeroFloat32, math.SmallestNonzeroFloat32, math.SmallestNonzeroFloat32, math.SmallestNonzeroFloat32, math.SmallestNonzeroFloat32, math.SmallestNonzeroFloat32, math.SmallestNonzeroFloat32, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2399,6 +2431,7 @@ func Test_ngt_InsertCommit(t *testing.T) { math.MaxFloat32, math.MaxFloat32, math.MaxFloat32, math.MaxFloat32, math.MaxFloat32, math.MaxFloat32, math.MaxFloat32, math.MaxFloat32, math.MaxFloat32, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2413,7 +2446,8 @@ func Test_ngt_InsertCommit(t *testing.T) { { name: "return error if dimension is not the same as insert vector", args: args{ - vec: []float32{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, + vec: []float32{0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2865,6 +2899,7 @@ func Test_ngt_BulkInsertCommit(t *testing.T) { vecs: [][]float32{ {0, 1, 2, 3, 4, 5, 6, 7, 8}, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2888,6 +2923,7 @@ func Test_ngt_BulkInsertCommit(t *testing.T) { {3, 4, 5, 6, 7, 8, 9, 10, 11}, {4, 5, 6, 7, 8, 9, 10, 11, 12}, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2908,6 +2944,7 @@ func Test_ngt_BulkInsertCommit(t *testing.T) { {0, 1, 2, 3, 4, 5, 6, 7, 8}, {0, 1, 2, 3, 4, 5, 6, 7, 8}, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2930,6 +2967,7 @@ func Test_ngt_BulkInsertCommit(t *testing.T) { {0, 1, 2, 3, 4, 5, 6, 7}, {0, 1, 2, 3, 4, 5, 6, 7, 8, 10}, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2953,6 +2991,7 @@ func Test_ngt_BulkInsertCommit(t *testing.T) { vecs: [][]float32{ {0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2976,6 +3015,7 @@ func Test_ngt_BulkInsertCommit(t *testing.T) { {0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.10, 0.11}, {0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.10, 0.11, 0.12}, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -2996,6 +3036,7 @@ func Test_ngt_BulkInsertCommit(t *testing.T) { {0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, {0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8}, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -3018,6 +3059,7 @@ func Test_ngt_BulkInsertCommit(t *testing.T) { {0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7}, {0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.10}, }, + poolSize: uint32(1), }, fields: fields{ idxPath: idxTempDir(t), @@ -3147,6 +3189,7 @@ func Test_ngt_CreateAndSaveIndex(t *testing.T) { bulkInsertChunkSize: 5, dimension: 9, objectType: Float, + poolSize: uint32(1), }, }, { @@ -3187,6 +3230,7 @@ func Test_ngt_CreateAndSaveIndex(t *testing.T) { bulkInsertChunkSize: 100, dimension: 9, objectType: Float, + poolSize: uint32(1), }, createFunc: func(t *testing.T, f fields) (NGT, error) { t.Helper() @@ -3246,6 +3290,7 @@ func Test_ngt_CreateAndSaveIndex(t *testing.T) { bulkInsertChunkSize: 5, dimension: 9, objectType: Float, + poolSize: uint32(1), }, createFunc: func(t *testing.T, f fields) (NGT, error) { t.Helper() @@ -3291,6 +3336,7 @@ func Test_ngt_CreateAndSaveIndex(t *testing.T) { bulkInsertChunkSize: 5, dimension: 9, objectType: Float, + poolSize: uint32(1), }, }, } @@ -3406,6 +3452,7 @@ func Test_ngt_CreateIndex(t *testing.T) { bulkInsertChunkSize: 5, dimension: 9, objectType: Float, + poolSize: uint32(1), }, }, { diff --git a/internal/db/rdb/mysql/dbr/dbr.go b/internal/db/rdb/mysql/dbr/dbr.go index db63f4ef6c..355f54138e 100644 --- a/internal/db/rdb/mysql/dbr/dbr.go +++ b/internal/db/rdb/mysql/dbr/dbr.go @@ -18,7 +18,7 @@ package dbr import dbr "github.com/gocraft/dbr/v2" -// DBR repreesnts the interface to create connection to MySQL. +// DBR represents the interface to create connection to MySQL. type DBR interface { Open(driver, dsn string, log EventReceiver) (Connection, error) Eq(col string, val any) Builder @@ -58,3 +58,11 @@ func (*db) Open(driver, dsn string, log EventReceiver) (Connection, error) { func (*db) Eq(col string, val any) Builder { return dbr.Eq(col, val) } + +func prepareSelect[T any](a ...T) (b []any) { + b = make([]any, len(a)) + for i := range a { + b[i] = a[i] + } + return b +} diff --git a/internal/db/rdb/mysql/dbr/insert.go b/internal/db/rdb/mysql/dbr/insert.go index 79be43ec0b..25fb4e8fe6 100644 --- a/internal/db/rdb/mysql/dbr/insert.go +++ b/internal/db/rdb/mysql/dbr/insert.go @@ -34,13 +34,13 @@ type insertStmt struct { *dbr.InsertStmt } -// Columns set colums to the insertStmt. +// Columns set column to the insertStmt. func (stmt *insertStmt) Columns(column ...string) InsertStmt { stmt.InsertStmt = stmt.InsertStmt.Columns(column...) return stmt } -// ExecContext execure inserting to the database. +// ExecContext execute inserting to the database. func (stmt *insertStmt) ExecContext(ctx context.Context) (sql.Result, error) { return stmt.InsertStmt.ExecContext(ctx) } diff --git a/internal/db/rdb/mysql/dbr/session.go b/internal/db/rdb/mysql/dbr/session.go index 3fe1a7640e..283006da3e 100644 --- a/internal/db/rdb/mysql/dbr/session.go +++ b/internal/db/rdb/mysql/dbr/session.go @@ -39,10 +39,10 @@ func NewSession(conn Connection, event EventReceiver) Session { return conn.NewSession(event) } -// SeleSelect creates and returns the SelectStmt. +// Select creates and returns the SelectStmt. func (sess *session) Select(column ...string) SelectStmt { return &selectStmt{ - sess.Session.Select(column...), + sess.Session.Select(prepareSelect(column...)...), } } @@ -56,7 +56,7 @@ func (sess *session) Begin() (Tx, error) { // Close closes the database and prevents new queries from starting. // Close then waits for all queries that have started processing on the server to finish. -// Close returns the errro if something goes worng during close. +// Close returns the error if something goes wrong during close. func (sess *session) Close() error { return sess.Session.Close() } diff --git a/internal/db/rdb/mysql/dbr/tx.go b/internal/db/rdb/mysql/dbr/tx.go index e963ac778d..67216c23e3 100644 --- a/internal/db/rdb/mysql/dbr/tx.go +++ b/internal/db/rdb/mysql/dbr/tx.go @@ -43,7 +43,7 @@ func (t *tx) Rollback() error { return t.Tx.Rollback() } -// RollbackUnlessCommitted rollsback the transaction unless it has already been committed or rolled back. +// RollbackUnlessCommitted rollbacks the transaction unless it has already been committed or rolled back. func (t *tx) RollbackUnlessCommitted() { t.Tx.RollbackUnlessCommitted() } @@ -65,7 +65,7 @@ func (t *tx) InsertInto(table string) InsertStmt { // Select creates a SelectStmt. func (t *tx) Select(column ...string) SelectStmt { return &selectStmt{ - t.Tx.Select(column...), + t.Tx.Select(prepareSelect(column...)...), } } diff --git a/internal/db/rdb/mysql/mysql_test.go b/internal/db/rdb/mysql/mysql_test.go index 25c58df364..5cfb49a992 100644 --- a/internal/db/rdb/mysql/mysql_test.go +++ b/internal/db/rdb/mysql/mysql_test.go @@ -1818,7 +1818,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -1916,7 +1916,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2013,7 +2013,7 @@ func Test_mySQLClient_SetVector(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2512,7 +2512,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2612,7 +2612,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -2711,7 +2711,7 @@ func Test_mySQLClient_SetVectors(t *testing.T) { } tx.InsertIntoFunc = func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -3022,7 +3022,7 @@ func Test_mySQLClient_DeleteVector(t *testing.T) { func() test { err := errors.ErrMySQLTransactionNotCreated return test{ - name: "return error when transacton is nil", + name: "return error when transaction is nil", args: args{ ctx: context.Background(), uuid: "vald-01", @@ -3217,9 +3217,9 @@ func Test_mySQLClient_DeleteVector(t *testing.T) { } }(), func() test { - err := errors.New("podIPTableNmae error") + err := errors.New("podIPTableName error") return test{ - name: "return error when DeleteFromFunc(podIPTableNmae) returns error", + name: "return error when DeleteFromFunc(podIPTableName) returns error", args: args{ ctx: context.Background(), uuid: "vald-01", @@ -3813,7 +3813,7 @@ func Test_mySQLClient_SetIPs(t *testing.T) { RollbackUnlessCommittedFunc: func() {}, InsertIntoFunc: func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { @@ -3885,7 +3885,7 @@ func Test_mySQLClient_SetIPs(t *testing.T) { RollbackUnlessCommittedFunc: func() {}, InsertIntoFunc: func(table string) dbr.InsertStmt { s := new(dbr.MockInsert) - s.ColumnsFunc = func(colum ...string) dbr.InsertStmt { + s.ColumnsFunc = func(columns ...string) dbr.InsertStmt { return s } s.ExecContextFunc = func(ctx context.Context) (sql.Result, error) { diff --git a/internal/db/rdb/mysql/option.go b/internal/db/rdb/mysql/option.go index f43c720f5a..b93d93c8e8 100644 --- a/internal/db/rdb/mysql/option.go +++ b/internal/db/rdb/mysql/option.go @@ -182,7 +182,7 @@ func WithConnectionLifeTimeLimit(dur string) Option { } // WithMaxIdleConns returns the option to set the maxIdleConns. -// If conns is negative numner, no idle connections are retained. +// If conns is negative number, no idle connections are retained. // ref: https://golang.org/src/database/sql/sql.go?s=24983:25019#L879 func WithMaxIdleConns(conns int) Option { return func(m *mySQLClient) error { @@ -194,7 +194,7 @@ func WithMaxIdleConns(conns int) Option { } // WithMaxOpenConns returns the option to set the maxOpenConns. -// If conns is negative numner, no limit on the number of open connections. +// If conns is negative number, no limit on the number of open connections. // ref: https://golang.org/src/database/sql/sql.go?s=24983:25019#L923 func WithMaxOpenConns(conns int) Option { return func(m *mySQLClient) error { diff --git a/internal/db/storage/blob/cloudstorage/option.go b/internal/db/storage/blob/cloudstorage/option.go index a0df6a793f..40e143be88 100644 --- a/internal/db/storage/blob/cloudstorage/option.go +++ b/internal/db/storage/blob/cloudstorage/option.go @@ -39,7 +39,7 @@ func WithURL(str string) Option { } } -// WithURLOpener returns Option that sets c.urlOpner. +// WithURLOpener returns Option that sets c.urlOpener. func WithURLOpener(uo *gcsblob.URLOpener) Option { return func(c *client) error { if uo != nil { diff --git a/internal/db/storage/blob/s3/reader/option.go b/internal/db/storage/blob/s3/reader/option.go index c75c41613b..a3d5aab2d5 100644 --- a/internal/db/storage/blob/s3/reader/option.go +++ b/internal/db/storage/blob/s3/reader/option.go @@ -60,7 +60,7 @@ func WithBucket(bucket string) Option { } } -// WithMaxChunkSize retunrs the option to set the maxChunkSize. +// WithMaxChunkSize returns the option to set the maxChunkSize. func WithMaxChunkSize(size int64) Option { return func(r *reader) { r.maxChunkSize = size diff --git a/internal/db/storage/blob/s3/s3_test.go b/internal/db/storage/blob/s3/s3_test.go index 5902d5e3dd..2f324dd27d 100644 --- a/internal/db/storage/blob/s3/s3_test.go +++ b/internal/db/storage/blob/s3/s3_test.go @@ -389,7 +389,7 @@ func Test_client_Close(t *testing.T) { } tests := []test{ { - name: "retursn nil", + name: "returns nil", want: want{ err: nil, }, diff --git a/internal/db/storage/blob/s3/session/session_test.go b/internal/db/storage/blob/s3/session/session_test.go index 55d8dc7498..1973e624f8 100644 --- a/internal/db/storage/blob/s3/session/session_test.go +++ b/internal/db/storage/blob/s3/session/session_test.go @@ -503,7 +503,7 @@ func Test_sess_Session(t *testing.T) { }, }, { - name: "set EnableParamValdiation success", + name: "set EnableParamValidation success", fields: fields{ enableParamValidation: true, }, @@ -532,7 +532,7 @@ func Test_sess_Session(t *testing.T) { }, }, { - name: "set Enable100Conitnue success", + name: "set Enable100Continue success", fields: fields{ enable100Continue: true, }, diff --git a/internal/errors/agent.go b/internal/errors/agent.go index 80d1a5f35f..a583423e15 100644 --- a/internal/errors/agent.go +++ b/internal/errors/agent.go @@ -115,4 +115,50 @@ var ( // ErrWriteOperationToReadReplica represents an error that when a write operation is made to read replica. ErrWriteOperationToReadReplica = New("write operation to read replica is not possible") + + // ErrInvalidTimestamp represents a function to generate an error that the timestamp is invalid. + ErrInvalidTimestamp = func(ts int64) error { + return Errorf("invalid timestamp detected: %d", ts) + } + + // ErrFlushingIsInProgress represents an error that the flushing is in progress, but any request has been received. + ErrFlushingIsInProgress = New("flush is in progress") + + // ErrUUIDAlreadyExists represents a function to generate an error that the uuid already exists. + ErrUUIDAlreadyExists = func(uuid string) error { + return Errorf("uuid %s index already exists", uuid) + } + + // ErrUUIDNotFound represents a function to generate an error that the uuid is not found. + ErrUUIDNotFound = func(id uint32) error { + if id == 0 { + return New("object uuid not found") + } + return Errorf("object uuid %d's metadata not found", id) + } + + // ErrObjectIDNotFound represents a function to generate an error that the object id is not found. + ErrObjectIDNotFound = func(uuid string) error { + return Errorf("uuid %s's object id not found", uuid) + } + + // ErrRemoveRequestedBeforeIndexing represents a function to generate an error that the object is not indexed so can not remove it. + ErrRemoveRequestedBeforeIndexing = func(oid uint) error { + return Errorf("object id %d is not indexed we cannot remove it", oid) + } + + ErrSearchResultEmptyButNoDataStored = New("empty search result from cgo but no index data stored in agent, this error can be ignored.") + + // ErrZeroTimestamp represents an error that the timestamp is zero. + ErrZeroTimestamp = New("zero timestamp for index detected") + + // ErrNewerTimestampObjectAlreadyExists represents a function to generate an error that the object is already newer than request + ErrNewerTimestampObjectAlreadyExists = func(uuid string, ts int64) error { + return Errorf("uuid %s's object is already newer than requested timestamp %d", uuid, ts) + } + + // ErrNothingToBeDoneForUpdate represents a function to generate an error that there is no object to update + ErrNothingToBeDoneForUpdate = func(uuid string) error { + return Errorf("nothing to be done for update uuid %s's object", uuid) + } ) diff --git a/internal/errors/agent_test.go b/internal/errors/agent_test.go index 39068920d2..a71607ce90 100644 --- a/internal/errors/agent_test.go +++ b/internal/errors/agent_test.go @@ -17,7 +17,10 @@ // Package errors package errors -import "testing" +import ( + "math" + "testing" +) func TestErrObjectNotFound(t *testing.T) { type args struct { @@ -106,4 +109,1404 @@ func TestErrObjectNotFound(t *testing.T) { } } +func TestErrCreateProperty(t *testing.T) { + type args struct { + err error + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrCreateProperty error when err is ngt error", + args: args{ + err: New("ngt error"), + }, + want: want{ + want: New("failed to create property: ngt error"), + }, + }, + { + name: "return an ErrCreateProperty error when err is nil", + args: args{ + err: nil, + }, + want: want{ + want: New("failed to create property"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrCreateProperty(test.args.err) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrIndexNotFound(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrIndexNotFound error", + want: want{ + want: New("index not found"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrIndexNotFound + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrIndexLoadTimeout(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrIndexLoadTimeout error", + want: want{ + want: New("index load timeout"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrIndexLoadTimeout + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrInvalidDimensionSize(t *testing.T) { + type args struct { + current int + limit int + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrInvalidDimensionSize error when current is 10 and limit is 5", + args: args{ + current: 10, + limit: 5, + }, + want: want{ + want: New("dimension size 10 is invalid, the supporting dimension size must be between 2 ~ 5"), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current is 0 and limit is 5", + args: args{ + current: 0, + limit: 5, + }, + want: want{ + want: New("dimension size 0 is invalid, the supporting dimension size must be between 2 ~ 5"), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current is 10 and limit is 0", + args: args{ + current: 10, + limit: 0, + }, + want: want{ + want: New("dimension size 10 is invalid, the supporting dimension size must be bigger than 2"), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current is 0 and limit is 0", + args: args{ + current: 0, + limit: 0, + }, + want: want{ + want: New("dimension size 0 is invalid, the supporting dimension size must be bigger than 2"), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current and limit are the minimum value of int", + args: args{ + current: int(math.MinInt64), + limit: int(math.MinInt64), + }, + want: want{ + want: Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", int(math.MinInt64), int(math.MinInt64)), + }, + }, + { + name: "return an ErrInvalidDimensionSize error when current and limit are the minimum value of int", + args: args{ + current: int(math.MaxInt64), + limit: int(math.MaxInt64), + }, + want: want{ + want: Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", int(math.MaxInt64), int(math.MaxInt64)), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrInvalidDimensionSize(test.args.current, test.args.limit) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrInvalidUUID(t *testing.T) { + type args struct { + uuid string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrInvalidUUID error when uuid is empty string", + args: args{ + uuid: "", + }, + want: want{ + want: New("uuid \"\" is invalid"), + }, + }, + { + name: "return an ErrInvalidUUID error when uuid is foo", + args: args{ + uuid: "foo", + }, + want: want{ + want: New("uuid \"foo\" is invalid"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrInvalidUUID(test.args.uuid) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrDimensionLimitExceed(t *testing.T) { + type args struct { + current int + limit int + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrDimensionLimitExceed error when current is 10 and limit is 5", + args: args{ + current: 10, + limit: 5, + }, + want: want{ + want: New("supported dimension limit exceed:\trequired = 10,\tlimit = 5"), + }, + }, + + { + name: "return an ErrDimensionLimitExceed error when current is 0 and limit is 0", + args: args{ + current: 0, + limit: 0, + }, + want: want{ + want: New("supported dimension limit exceed:\trequired = 0,\tlimit = 0"), + }, + }, + { + name: "return an ErrDimensionLimitExceed error when current and limit are the minimum value of int", + args: args{ + current: int(math.MinInt64), + limit: int(math.MinInt64), + }, + want: want{ + want: Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", int(math.MinInt64), int(math.MinInt64)), + }, + }, + { + name: "return an ErrDimensionLimitExceed error when current and limit are the maximum value of int", + args: args{ + current: int(math.MaxInt64), + limit: int(math.MaxInt64), + }, + want: want{ + want: Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", int(math.MaxInt64), int(math.MaxInt64)), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrDimensionLimitExceed(test.args.current, test.args.limit) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrIncompatibleDimensionSize(t *testing.T) { + type args struct { + req int + dim int + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrIncompatibleDimensionSize error when req is 640 and dim is 720", + args: args{ + req: 640, + dim: 720, + }, + want: want{ + want: New("incompatible dimension size detected\trequested: 640,\tconfigured: 720"), + }, + }, + { + name: "return an ErrIncompatibleDimensionSize error when req is empty and dim is 720", + args: args{ + dim: 720, + }, + want: want{ + want: New("incompatible dimension size detected\trequested: 0,\tconfigured: 720"), + }, + }, + { + name: "return an ErrIncompatibleDimensionSize error when req is 640", + args: args{ + req: 640, + }, + want: want{ + want: New("incompatible dimension size detected\trequested: 640,\tconfigured: 0"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrIncompatibleDimensionSize(test.args.req, test.args.dim) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUnsupportedObjectType(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUnsupportedObjectType error", + want: want{ + want: New("unsupported ObjectType"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUnsupportedObjectType + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUnsupportedDistanceType(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUnsupportedDistanceType error", + want: want{ + want: New("unsupported DistanceType"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUnsupportedDistanceType + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetDistanceType(t *testing.T) { + type args struct { + err error + distance string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetDistanceType error when err is ngt error and distance is l2", + args: args{ + err: New("ngt error"), + distance: "l2", + }, + want: want{ + want: New("failed to set distance type l2: ngt error"), + }, + }, + { + name: "return a wrapped ErrFailedToSetDistanceType error when err is ngt error and distance is empty", + args: args{ + err: New("ngt error"), + distance: "", + }, + want: want{ + want: New("failed to set distance type : ngt error"), + }, + }, + { + name: "return an ErrFailedToSetDistanceType error when err is nil and distance is cos", + args: args{ + err: nil, + distance: "cos", + }, + want: want{ + want: New("failed to set distance type cos"), + }, + }, + { + name: "return an ErrFailedToSetDistanceType error when err is nil and distance is empty", + args: args{ + err: nil, + distance: "", + }, + want: want{ + want: New("failed to set distance type "), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetDistanceType(test.args.err, test.args.distance) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetObjectType(t *testing.T) { + type args struct { + err error + t string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetObjectType error when err is ngt error and t is Float", + args: args{ + err: New("ngt error"), + t: "Float", + }, + want: want{ + want: New("failed to set object type Float: ngt error"), + }, + }, + { + name: "return a wrapped ErrFailedToSetObjectType error when err is ngt error and t is empty", + args: args{ + err: New("ngt error"), + t: "", + }, + want: want{ + want: New("failed to set object type : ngt error"), + }, + }, + { + name: "return an ErrFailedToSetObjectType error when err is nil and t is Int", + args: args{ + err: nil, + t: "Int", + }, + want: want{ + want: New("failed to set object type Int"), + }, + }, + { + name: "return an ErrFailedToSetObjectType error when err is nil and t is empty", + args: args{ + err: nil, + t: "", + }, + want: want{ + want: New("failed to set object type "), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetObjectType(test.args.err, test.args.t) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetDimension(t *testing.T) { + type args struct { + err error + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetDimension error when err is ngt error", + args: args{ + err: New("ngt error"), + }, + want: want{ + want: New("failed to set dimension: ngt error"), + }, + }, + { + name: "return an ErrFailedToSetDimension error when err is nil", + args: args{ + err: nil, + }, + want: want{ + want: New("failed to set dimension"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetDimension(test.args.err) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetCreationEdgeSize(t *testing.T) { + type args struct { + err error + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetCreationEdgeSize error when err is ngt error", + args: args{ + err: New("ngt error"), + }, + want: want{ + want: New("failed to set creation edge size: ngt error"), + }, + }, + { + name: "return an ErrFailedToSetCreationEdgeSize error when err is nil", + args: args{ + err: nil, + }, + want: want{ + want: New("failed to set creation edge size"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetCreationEdgeSize(test.args.err) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrFailedToSetSearchEdgeSize(t *testing.T) { + type args struct { + err error + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return a wrapped ErrFailedToSetSearchEdgeSize error when err is ngt error", + args: args{ + err: New("ngt error"), + }, + want: want{ + want: New("failed to set search edge size: ngt error"), + }, + }, + { + name: "return an ErrFailedToSetSearchEdgeSize error when err is nil", + args: args{ + err: nil, + }, + want: want{ + want: New("failed to set search edge size"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrFailedToSetSearchEdgeSize(test.args.err) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUncommittedIndexExists(t *testing.T) { + type args struct { + num uint64 + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUncommittedIndexExists error when num is 100", + args: args{ + num: 100, + }, + want: want{ + want: New("100 indexes are not committed"), + }, + }, + + { + name: "return an ErrUncommittedIndexExists error when num is 0", + args: args{ + num: 0, + }, + want: want{ + want: New("0 indexes are not committed"), + }, + }, + { + name: "return an ErrUncommittedIndexExists error when num is the maximum value of uint64", + args: args{ + num: math.MaxUint64, + }, + want: want{ + want: Errorf("%d indexes are not committed", uint(math.MaxUint64)), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUncommittedIndexExists(test.args.num) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUncommittedIndexNotFound(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUncommittedIndexNotFound error", + want: want{ + want: New("uncommitted indexes are not found"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUncommittedIndexNotFound + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrCAPINotImplemented(t *testing.T) { + type want struct { + want error + } + type test struct { + name string + want want + checkFunc func(want, error) error + beforeFunc func() + afterFunc func() + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrCAPINotImplemented error", + want: want{ + want: New("not implemented in C API"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc() + } + if test.afterFunc != nil { + defer test.afterFunc() + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrCAPINotImplemented + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUUIDAlreadyExists(t *testing.T) { + type args struct { + uuid string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUUIDAlreadyExists error when uuid is 550e8400-e29b-41d4", + args: args{ + uuid: "550e8400-e29b-41d4", + }, + want: want{ + want: New("uuid 550e8400-e29b-41d4 index already exists"), + }, + }, + { + name: "return an ErrUUIDAlreadyExists error when uuid is empty", + args: args{ + uuid: "", + }, + want: want{ + want: New("uuid index already exists"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUUIDAlreadyExists(test.args.uuid) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrUUIDNotFound(t *testing.T) { + type args struct { + id uint32 + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrUUIDNotFound error when id is 1234", + args: args{ + id: 1234, + }, + want: want{ + want: New("object uuid 1234's metadata not found"), + }, + }, + { + name: "return an ErrUUIDNotFound error when id is the maximum value of uint32", + args: args{ + id: math.MaxUint32, + }, + want: want{ + want: Errorf("object uuid %d's metadata not found", math.MaxUint32), + }, + }, + { + name: "return an ErrUUIDNotFound error when id is 0", + args: args{ + id: 0, + }, + want: want{ + want: New("object uuid not found"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrUUIDNotFound(test.args.id) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrObjectIDNotFound(t *testing.T) { + type args struct { + uuid string + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrObjectIDNotFound error when uuid is 550e8400-e29b-41d4.", + args: args{ + uuid: "550e8400-e29b-41d4", + }, + want: want{ + want: New("uuid 550e8400-e29b-41d4's object id not found"), + }, + }, + { + name: "return an ErrObjectIDNotFound error when uuid is empty.", + args: args{ + uuid: "", + }, + want: want{ + want: New("uuid 's object id not found"), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrObjectIDNotFound(test.args.uuid) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + +func TestErrRemoveRequestedBeforeIndexing(t *testing.T) { + type args struct { + oid uint + } + type want struct { + want error + } + type test struct { + name string + args args + want want + checkFunc func(want, error) error + beforeFunc func(args) + afterFunc func(args) + } + defaultCheckFunc := func(w want, got error) error { + if !Is(got, w.want) { + return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) + } + return nil + } + tests := []test{ + { + name: "return an ErrRemoveRequestedBeforeIndexing error when oid is 100", + args: args{ + oid: 100, + }, + want: want{ + want: New("object id 100 is not indexed we cannot remove it"), + }, + }, + { + name: "return an ErrRemoveRequestedBeforeIndexing error when oid is 0", + args: args{ + oid: 0, + }, + want: want{ + want: New("object id 0 is not indexed we cannot remove it"), + }, + }, + { + name: "return an ErrRemoveRequestedBeforeIndexing error when oid is maximum value of uint", + args: args{ + oid: uint(math.MaxUint64), + }, + want: want{ + want: Errorf("object id %d is not indexed we cannot remove it", uint(math.MaxUint64)), + }, + }, + } + + for _, tc := range tests { + test := tc + t.Run(test.name, func(tt *testing.T) { + if test.beforeFunc != nil { + test.beforeFunc(test.args) + } + if test.afterFunc != nil { + defer test.afterFunc(test.args) + } + checkFunc := test.checkFunc + if test.checkFunc == nil { + checkFunc = defaultCheckFunc + } + + got := ErrRemoveRequestedBeforeIndexing(test.args.oid) + if err := checkFunc(test.want, got); err != nil { + tt.Errorf("error = %v", err) + } + }) + } +} + // NOT IMPLEMENTED BELOW diff --git a/internal/errors/corrector.go b/internal/errors/corrector.go index e4f2180529..626f7a3e0a 100644 --- a/internal/errors/corrector.go +++ b/internal/errors/corrector.go @@ -32,12 +32,9 @@ var ErrNoAvailableAgentToRemove = New("no available agent to remove replica") // ErrFailedToCorrectReplicaNum represents an error that failed to correct replica number after correction process. var ErrFailedToCorrectReplicaNum = New("failed to correct replica number after correction process") -// ErrFailedToReceiveVectorFromStream represents an error that failed to receive vector from stream while index correction process. -var ErrFailedToReceiveVectorFromStream = New("failed to receive vector from stream") - // ErrFailedToCheckConsistency represents an error that failed to check consistency process while index correction process. var ErrFailedToCheckConsistency = func(err error) error { - return Wrap(err, "failed to check consistency while index correctioin process") + return Wrap(err, "failed to check consistency while index correction process") } // ErrStreamListObjectStreamFinishedUnexpectedly represents an error that StreamListObject finished not because of io.EOF. diff --git a/internal/errors/errors.go b/internal/errors/errors.go index bbe264ca10..851ca4d829 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -156,13 +156,16 @@ var ( ) // Is represents a function to check whether err and the target is the same or not. -func Is(err, target error) bool { +func Is(err, target error) (same bool) { if target == nil || err == nil { return err == target } - isComparable := reflect.TypeOf(target).Comparable() + return is(err, target, reflect.TypeOf(target).Comparable()) +} + +func is(err, target error, targetComparable bool) (same bool) { for { - if isComparable && (err == target || + if targetComparable && (err == target || err.Error() == target.Error() || strings.EqualFold(err.Error(), target.Error())) { return true @@ -177,21 +180,19 @@ func Is(err, target error) bool { case interface{ Unwrap() error }: err = x.Unwrap() if err == nil { - return isComparable && err == target || - err.Error() == target.Error() || - strings.EqualFold(err.Error(), target.Error()) + return false } case interface{ Unwrap() []error }: for _, err = range x.Unwrap() { - if Is(err, target) { + if is(err, target, targetComparable) { return true } } - return isComparable && err == target || + return targetComparable && err == target || err.Error() == target.Error() || strings.EqualFold(err.Error(), target.Error()) default: - return isComparable && err == target || + return targetComparable && err == target || err.Error() == target.Error() || strings.EqualFold(err.Error(), target.Error()) } diff --git a/internal/errors/grpc.go b/internal/errors/grpc.go index 5e232ff0f3..ab500d8c44 100644 --- a/internal/errors/grpc.go +++ b/internal/errors/grpc.go @@ -63,7 +63,7 @@ var ( // ErrGRPCUnexpectedStatusError represents an error that the gRPC status code is undefined. ErrGRPCUnexpectedStatusError = func(code string, err error) error { - return Wrapf(err, "unexcepted error detected: code %s", code) + return Wrapf(err, "unexpected error detected: code %s", code) } // ErrInvalidProtoMessageType represents an error that the gRPC protocol buffers message type is invalid. diff --git a/internal/errors/net.go b/internal/errors/net.go index 8845b74226..c2dc540cbf 100644 --- a/internal/errors/net.go +++ b/internal/errors/net.go @@ -37,8 +37,14 @@ var ( return Errorf("no port available for Host: %s\tbetween %d ~ %d", host, start, end) } - // ErrLookupIPAddrNotFound represents a function to generate an error that the host's ip address could not discovererd from DNS. + // ErrLookupIPAddrNotFound represents a function to generate an error that the host's ip address could not discovered from DNS. ErrLookupIPAddrNotFound = func(host string) error { return Errorf("failed to lookup ip addrs for host: %s", host) } + + ErrInvalidAddress = func(network, addr string) error { + return Errorf("invalid address %s detected for network: %s", addr, network) + } + + ErrEmptyALPNs = New("empty ALPN protocols detected") ) diff --git a/internal/errors/ngt.go b/internal/errors/ngt.go index cfc1b71630..d922ed103e 100644 --- a/internal/errors/ngt.go +++ b/internal/errors/ngt.go @@ -18,35 +18,6 @@ package errors var ( - - // ErrFlushingIsInProgress represents an error that the flushing is in progress, but any request has been received. - ErrFlushingIsInProgress = New("flush is in progress") - - // ErrUUIDAlreadyExists represents a function to generate an error that the uuid already exists. - ErrUUIDAlreadyExists = func(uuid string) error { - return Errorf("ngt uuid %s index already exists", uuid) - } - - // ErrUUIDNotFound represents a function to generate an error that the uuid is not found. - ErrUUIDNotFound = func(id uint32) error { - if id == 0 { - return New("ngt object uuid not found") - } - return Errorf("ngt object uuid %d's metadata not found", id) - } - - // ErrObjectIDNotFound represents a function to generate an error that the object id is not found. - ErrObjectIDNotFound = func(uuid string) error { - return Errorf("ngt uuid %s's object id not found", uuid) - } - - // ErrRemoveRequestedBeforeIndexing represents a function to generate an error that the object is not indexed so can not remove it. - ErrRemoveRequestedBeforeIndexing = func(oid uint) error { - return Errorf("object id %d is not indexed we cannot remove it", oid) - } - - ErrSearchResultEmptyButNoDataStored = New("empty search result from cgo but no index data stored in ngt, this error can be ignored.") - ErrNGTIndexStatisticsDisabled = New("ngt get statistics is disabled") ErrNGTIndexStatisticsNotReady = New("ngt get statistics is not ready") diff --git a/internal/errors/ngt_test.go b/internal/errors/ngt_test.go index f8c07f949d..af4aa32c26 100644 --- a/internal/errors/ngt_test.go +++ b/internal/errors/ngt_test.go @@ -17,1411 +17,6 @@ // Package errors package errors -import ( - "math" - "testing" -) - -func TestErrCreateProperty(t *testing.T) { - type args struct { - err error - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrCreateProperty error when err is ngt error", - args: args{ - err: New("ngt error"), - }, - want: want{ - want: New("failed to create property: ngt error"), - }, - }, - { - name: "return an ErrCreateProperty error when err is nil", - args: args{ - err: nil, - }, - want: want{ - want: New("failed to create property"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrCreateProperty(test.args.err) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrIndexNotFound(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrIndexNotFound error", - want: want{ - want: New("index not found"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrIndexNotFound - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrIndexLoadTimeout(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrIndexLoadTimeout error", - want: want{ - want: New("index load timeout"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrIndexLoadTimeout - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrInvalidDimensionSize(t *testing.T) { - type args struct { - current int - limit int - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrInvalidDimensionSize error when current is 10 and limit is 5", - args: args{ - current: 10, - limit: 5, - }, - want: want{ - want: New("dimension size 10 is invalid, the supporting dimension size must be between 2 ~ 5"), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current is 0 and limit is 5", - args: args{ - current: 0, - limit: 5, - }, - want: want{ - want: New("dimension size 0 is invalid, the supporting dimension size must be between 2 ~ 5"), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current is 10 and limit is 0", - args: args{ - current: 10, - limit: 0, - }, - want: want{ - want: New("dimension size 10 is invalid, the supporting dimension size must be bigger than 2"), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current is 0 and limit is 0", - args: args{ - current: 0, - limit: 0, - }, - want: want{ - want: New("dimension size 0 is invalid, the supporting dimension size must be bigger than 2"), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current and limit are the minimum value of int", - args: args{ - current: int(math.MinInt64), - limit: int(math.MinInt64), - }, - want: want{ - want: Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", int(math.MinInt64), int(math.MinInt64)), - }, - }, - { - name: "return an ErrInvalidDimensionSize error when current and limit are the minimum value of int", - args: args{ - current: int(math.MaxInt64), - limit: int(math.MaxInt64), - }, - want: want{ - want: Errorf("dimension size %d is invalid, the supporting dimension size must be between 2 ~ %d", int(math.MaxInt64), int(math.MaxInt64)), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrInvalidDimensionSize(test.args.current, test.args.limit) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrInvalidUUID(t *testing.T) { - type args struct { - uuid string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrInvalidUUID error when uuid is empty string", - args: args{ - uuid: "", - }, - want: want{ - want: New("uuid \"\" is invalid"), - }, - }, - { - name: "return an ErrInvalidUUID error when uuid is foo", - args: args{ - uuid: "foo", - }, - want: want{ - want: New("uuid \"foo\" is invalid"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrInvalidUUID(test.args.uuid) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrDimensionLimitExceed(t *testing.T) { - type args struct { - current int - limit int - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrDimensionLimitExceed error when current is 10 and limit is 5", - args: args{ - current: 10, - limit: 5, - }, - want: want{ - want: New("supported dimension limit exceed:\trequired = 10,\tlimit = 5"), - }, - }, - - { - name: "return an ErrDimensionLimitExceed error when current is 0 and limit is 0", - args: args{ - current: 0, - limit: 0, - }, - want: want{ - want: New("supported dimension limit exceed:\trequired = 0,\tlimit = 0"), - }, - }, - { - name: "return an ErrDimensionLimitExceed error when current and limit are the minimum value of int", - args: args{ - current: int(math.MinInt64), - limit: int(math.MinInt64), - }, - want: want{ - want: Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", int(math.MinInt64), int(math.MinInt64)), - }, - }, - { - name: "return an ErrDimensionLimitExceed error when current and limit are the maximum value of int", - args: args{ - current: int(math.MaxInt64), - limit: int(math.MaxInt64), - }, - want: want{ - want: Errorf("supported dimension limit exceed:\trequired = %d,\tlimit = %d", int(math.MaxInt64), int(math.MaxInt64)), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrDimensionLimitExceed(test.args.current, test.args.limit) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrIncompatibleDimensionSize(t *testing.T) { - type args struct { - req int - dim int - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrIncompatibleDimensionSize error when req is 640 and dim is 720", - args: args{ - req: 640, - dim: 720, - }, - want: want{ - want: New("incompatible dimension size detected\trequested: 640,\tconfigured: 720"), - }, - }, - { - name: "return an ErrIncompatibleDimensionSize error when req is empty and dim is 720", - args: args{ - dim: 720, - }, - want: want{ - want: New("incompatible dimension size detected\trequested: 0,\tconfigured: 720"), - }, - }, - { - name: "return an ErrIncompatibleDimensionSize error when req is 640", - args: args{ - req: 640, - }, - want: want{ - want: New("incompatible dimension size detected\trequested: 640,\tconfigured: 0"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrIncompatibleDimensionSize(test.args.req, test.args.dim) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUnsupportedObjectType(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUnsupportedObjectType error", - want: want{ - want: New("unsupported ObjectType"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUnsupportedObjectType - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUnsupportedDistanceType(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUnsupportedDistanceType error", - want: want{ - want: New("unsupported DistanceType"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUnsupportedDistanceType - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetDistanceType(t *testing.T) { - type args struct { - err error - distance string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetDistanceType error when err is ngt error and distance is l2", - args: args{ - err: New("ngt error"), - distance: "l2", - }, - want: want{ - want: New("failed to set distance type l2: ngt error"), - }, - }, - { - name: "return a wrapped ErrFailedToSetDistanceType error when err is ngt error and distance is empty", - args: args{ - err: New("ngt error"), - distance: "", - }, - want: want{ - want: New("failed to set distance type : ngt error"), - }, - }, - { - name: "return an ErrFailedToSetDistanceType error when err is nil and distance is cos", - args: args{ - err: nil, - distance: "cos", - }, - want: want{ - want: New("failed to set distance type cos"), - }, - }, - { - name: "return an ErrFailedToSetDistanceType error when err is nil and distance is empty", - args: args{ - err: nil, - distance: "", - }, - want: want{ - want: New("failed to set distance type "), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetDistanceType(test.args.err, test.args.distance) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetObjectType(t *testing.T) { - type args struct { - err error - t string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetObjectType error when err is ngt error and t is Float", - args: args{ - err: New("ngt error"), - t: "Float", - }, - want: want{ - want: New("failed to set object type Float: ngt error"), - }, - }, - { - name: "return a wrapped ErrFailedToSetObjectType error when err is ngt error and t is empty", - args: args{ - err: New("ngt error"), - t: "", - }, - want: want{ - want: New("failed to set object type : ngt error"), - }, - }, - { - name: "return an ErrFailedToSetObjectType error when err is nil and t is Int", - args: args{ - err: nil, - t: "Int", - }, - want: want{ - want: New("failed to set object type Int"), - }, - }, - { - name: "return an ErrFailedToSetObjectType error when err is nil and t is empty", - args: args{ - err: nil, - t: "", - }, - want: want{ - want: New("failed to set object type "), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetObjectType(test.args.err, test.args.t) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetDimension(t *testing.T) { - type args struct { - err error - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetDimension error when err is ngt error", - args: args{ - err: New("ngt error"), - }, - want: want{ - want: New("failed to set dimension: ngt error"), - }, - }, - { - name: "return an ErrFailedToSetDimension error when err is nil", - args: args{ - err: nil, - }, - want: want{ - want: New("failed to set dimension"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetDimension(test.args.err) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetCreationEdgeSize(t *testing.T) { - type args struct { - err error - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetCreationEdgeSize error when err is ngt error", - args: args{ - err: New("ngt error"), - }, - want: want{ - want: New("failed to set creation edge size: ngt error"), - }, - }, - { - name: "return an ErrFailedToSetCreationEdgeSize error when err is nil", - args: args{ - err: nil, - }, - want: want{ - want: New("failed to set creation edge size"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetCreationEdgeSize(test.args.err) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrFailedToSetSearchEdgeSize(t *testing.T) { - type args struct { - err error - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return a wrapped ErrFailedToSetSearchEdgeSize error when err is ngt error", - args: args{ - err: New("ngt error"), - }, - want: want{ - want: New("failed to set search edge size: ngt error"), - }, - }, - { - name: "return an ErrFailedToSetSearchEdgeSize error when err is nil", - args: args{ - err: nil, - }, - want: want{ - want: New("failed to set search edge size"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrFailedToSetSearchEdgeSize(test.args.err) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUncommittedIndexExists(t *testing.T) { - type args struct { - num uint64 - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUncommittedIndexExists error when num is 100", - args: args{ - num: 100, - }, - want: want{ - want: New("100 indexes are not committed"), - }, - }, - - { - name: "return an ErrUncommittedIndexExists error when num is 0", - args: args{ - num: 0, - }, - want: want{ - want: New("0 indexes are not committed"), - }, - }, - { - name: "return an ErrUncommittedIndexExists error when num is the maximum value of uint64", - args: args{ - num: math.MaxUint64, - }, - want: want{ - want: Errorf("%d indexes are not committed", uint(math.MaxUint64)), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUncommittedIndexExists(test.args.num) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUncommittedIndexNotFound(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUncommittedIndexNotFound error", - want: want{ - want: New("uncommitted indexes are not found"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUncommittedIndexNotFound - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrCAPINotImplemented(t *testing.T) { - type want struct { - want error - } - type test struct { - name string - want want - checkFunc func(want, error) error - beforeFunc func() - afterFunc func() - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrCAPINotImplemented error", - want: want{ - want: New("not implemented in C API"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc() - } - if test.afterFunc != nil { - defer test.afterFunc() - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrCAPINotImplemented - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUUIDAlreadyExists(t *testing.T) { - type args struct { - uuid string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUUIDAlreadyExists error when uuid is 550e8400-e29b-41d4", - args: args{ - uuid: "550e8400-e29b-41d4", - }, - want: want{ - want: New("ngt uuid 550e8400-e29b-41d4 index already exists"), - }, - }, - { - name: "return an ErrUUIDAlreadyExists error when uuid is empty", - args: args{ - uuid: "", - }, - want: want{ - want: New("ngt uuid index already exists"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUUIDAlreadyExists(test.args.uuid) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrUUIDNotFound(t *testing.T) { - type args struct { - id uint32 - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrUUIDNotFound error when id is 1234", - args: args{ - id: 1234, - }, - want: want{ - want: New("ngt object uuid 1234's metadata not found"), - }, - }, - { - name: "return an ErrUUIDNotFound error when id is the maximum value of uint32", - args: args{ - id: math.MaxUint32, - }, - want: want{ - want: Errorf("ngt object uuid %d's metadata not found", math.MaxUint32), - }, - }, - { - name: "return an ErrUUIDNotFound error when id is 0", - args: args{ - id: 0, - }, - want: want{ - want: New("ngt object uuid not found"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrUUIDNotFound(test.args.id) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrObjectIDNotFound(t *testing.T) { - type args struct { - uuid string - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrObjectIDNotFound error when uuid is 550e8400-e29b-41d4.", - args: args{ - uuid: "550e8400-e29b-41d4", - }, - want: want{ - want: New("ngt uuid 550e8400-e29b-41d4's object id not found"), - }, - }, - { - name: "return an ErrObjectIDNotFound error when uuid is empty.", - args: args{ - uuid: "", - }, - want: want{ - want: New("ngt uuid 's object id not found"), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrObjectIDNotFound(test.args.uuid) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - -func TestErrRemoveRequestedBeforeIndexing(t *testing.T) { - type args struct { - oid uint - } - type want struct { - want error - } - type test struct { - name string - args args - want want - checkFunc func(want, error) error - beforeFunc func(args) - afterFunc func(args) - } - defaultCheckFunc := func(w want, got error) error { - if !Is(got, w.want) { - return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) - } - return nil - } - tests := []test{ - { - name: "return an ErrRemoveRequestedBeforeIndexing error when oid is 100", - args: args{ - oid: 100, - }, - want: want{ - want: New("object id 100 is not indexed we cannot remove it"), - }, - }, - { - name: "return an ErrRemoveRequestedBeforeIndexing error when oid is 0", - args: args{ - oid: 0, - }, - want: want{ - want: New("object id 0 is not indexed we cannot remove it"), - }, - }, - { - name: "return an ErrRemoveRequestedBeforeIndexing error when oid is maximum value of uint", - args: args{ - oid: uint(math.MaxUint64), - }, - want: want{ - want: Errorf("object id %d is not indexed we cannot remove it", uint(math.MaxUint64)), - }, - }, - } - - for _, tc := range tests { - test := tc - t.Run(test.name, func(tt *testing.T) { - if test.beforeFunc != nil { - test.beforeFunc(test.args) - } - if test.afterFunc != nil { - defer test.afterFunc(test.args) - } - checkFunc := test.checkFunc - if test.checkFunc == nil { - checkFunc = defaultCheckFunc - } - - got := ErrRemoveRequestedBeforeIndexing(test.args.oid) - if err := checkFunc(test.want, got); err != nil { - tt.Errorf("error = %v", err) - } - }) - } -} - // NOT IMPLEMENTED BELOW // // func TestNewNGTError(t *testing.T) { diff --git a/internal/errors/option_test.go b/internal/errors/option_test.go index c65e1fb74f..2c6357eadb 100644 --- a/internal/errors/option_test.go +++ b/internal/errors/option_test.go @@ -50,7 +50,7 @@ func TestNewErrInvalidOption(t *testing.T) { name := "WithPort" val := 9000 return test{ - name: "return ErrInvalidOpton when name and val have a value and errs is empty.", + name: "return ErrInvalidOption when name and val have a value and errs is empty.", args: args{ name: name, val: val, @@ -70,7 +70,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := errs[0] return test{ - name: "return ErrInvalidOpton when all of parameter has value.", + name: "return ErrInvalidOption when all of parameter has value.", args: args{ name: name, val: val, @@ -93,7 +93,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := errs[1] return test{ - name: "return ErrInvalidOpton when all of parameter has value and errs has nil as value.", + name: "return ErrInvalidOption when all of parameter has value and errs has nil as value.", args: args{ name: name, val: val, @@ -115,7 +115,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := Wrap(errs[1], errs[0].Error()) return test{ - name: "return ErrInvalidOpton when name is nil and val and errs have values.", + name: "return ErrInvalidOption when name is nil and val and errs have values.", args: args{ val: val, errs: errs, @@ -136,7 +136,7 @@ func TestNewErrInvalidOption(t *testing.T) { } e := Wrap(errs[1], errs[0].Error()) return test{ - name: "return ErrInvalidOpton when val is nil and name and errs have values.", + name: "return ErrInvalidOption when val is nil and name and errs have values.", args: args{ name: name, errs: errs, diff --git a/internal/errors/redis.go b/internal/errors/redis.go index 0221b6186a..cf28c70b7d 100644 --- a/internal/errors/redis.go +++ b/internal/errors/redis.go @@ -21,7 +21,7 @@ var ( // ErrRedisInvalidKVVKPrefix represents a function to generate an error that kv index and vk prefix are invalid. ErrRedisInvalidKVVKPrefix = func(kv, vk string) error { - return Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", kv, vk) + return Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", kv, vk) } // ErrRedisNotFoundIdentity generates an RedisNotFoundIdentityError error. diff --git a/internal/errors/redis_test.go b/internal/errors/redis_test.go index 643fe8e7fa..58b8e7f1cb 100644 --- a/internal/errors/redis_test.go +++ b/internal/errors/redis_test.go @@ -24,7 +24,7 @@ import ( "github.com/vdaas/vald/internal/test/goleak" ) -func TestErrRedisInvalidKVVKPrefic(t *testing.T) { +func TestErrRedisInvalidKVVKPrefix(t *testing.T) { type fields struct { kv string vk string @@ -56,7 +56,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { vk: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", str, str), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", str, str), }, } }(), @@ -67,7 +67,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { kv: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", str, ""), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", str, ""), }, } }(), @@ -78,7 +78,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { vk: str, }, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", "", str), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", "", str), }, } }(), @@ -87,7 +87,7 @@ func TestErrRedisInvalidKVVKPrefic(t *testing.T) { name: "return an ErrRedisInvalidKVVKPrefix error when kv and vk are empty", fields: fields{}, want: want{ - want: Errorf("kv index and vk prefix must be defferent.\t(kv: %s,\tvk: %s)", "", ""), + want: Errorf("kv index and vk prefix must be different.\t(kv: %s,\tvk: %s)", "", ""), }, } }(), @@ -167,7 +167,7 @@ func TestErrRedisNotFoundIdentity(t *testing.T) { } } -func TestErrRdisNotFound(t *testing.T) { +func TestErrRedisNotFound(t *testing.T) { type fields struct { key string } @@ -304,7 +304,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -316,7 +316,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -327,7 +327,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -338,7 +338,7 @@ func TestErrRedisGetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to fetch key ()"), @@ -396,7 +396,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -408,7 +408,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -419,7 +419,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -430,7 +430,7 @@ func TestErrRedisSetOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to set key ()"), @@ -488,7 +488,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { tests := []test{ func() test { return test{ - name: "return a wraped error when key is not empty and err is not nil", + name: "return a wrapped error when key is not empty and err is not nil", fields: fields{ key: key, err: err, @@ -500,7 +500,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is not empty and err is nil", + name: "return a wrapped error when key is not empty and err is nil", fields: fields{ key: key, }, @@ -511,7 +511,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is not nil", + name: "return a wrapped error when key is empty and err is not nil", fields: fields{ err: err, }, @@ -522,7 +522,7 @@ func TestErrRedisDeleteOperationFailed(t *testing.T) { }(), func() test { return test{ - name: "return a wraped error when key is empty and err is nil", + name: "return a wrapped error when key is empty and err is nil", fields: fields{}, want: want{ want: Wrap(nil, "Failed to delete key ()"), diff --git a/internal/errors/tls.go b/internal/errors/tls.go index fd3b99dcb3..0565cc4fd2 100644 --- a/internal/errors/tls.go +++ b/internal/errors/tls.go @@ -20,10 +20,10 @@ package errors var ( // TLS. - // ErrTLSDisabled is error variable, it's replesents config error that tls is disabled by config. + // ErrTLSDisabled is error variable, it's represents config error that tls is disabled by config. ErrTLSDisabled = New("tls feature is disabled") - // ErrTLSCertOrKeyNotFound is error variable, it's replesents tls cert or key not found error. + // ErrTLSCertOrKeyNotFound is error variable, it's represents tls cert or key not found error. ErrTLSCertOrKeyNotFound = New("cert or key file path not found") ErrCertificationFailed = New("certification failed") diff --git a/internal/errors/usearch_test.go b/internal/errors/usearch_test.go new file mode 100644 index 0000000000..2151521ffc --- /dev/null +++ b/internal/errors/usearch_test.go @@ -0,0 +1,189 @@ +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package errors + +// NOT IMPLEMENTED BELOW +// +// func TestNewUsearchError(t *testing.T) { +// type args struct { +// msg string +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !Is(err, w.err) { +// return Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// msg:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// msg:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// err := NewUsearchError(test.args.msg) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestUsearchError_Error(t *testing.T) { +// type fields struct { +// Msg string +// } +// type want struct { +// want string +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, string) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got string) error { +// if !reflect.DeepEqual(got, w.want) { +// return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// Msg:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// Msg:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// u := UsearchError{ +// Msg: test.fields.Msg, +// } +// +// got := u.Error() +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/errors/vald.go b/internal/errors/vald.go index 9d61857733..2bb7f5edcb 100644 --- a/internal/errors/vald.go +++ b/internal/errors/vald.go @@ -25,7 +25,7 @@ var ( // ErrSameVectorAlreadyExists represents an error that vald already has same features vector data. ErrSameVectorAlreadyExists = func(meta string, n, o []float32) error { - return Errorf("vald metadata:\t%s\talready exists reqested: %v, stored: %v", meta, n, o) + return Errorf("vald metadata:\t%s\talready exists requested: %v, stored: %v", meta, n, o) } // ErrMetaDataCannotFetch represents an error that vald metadata cannot fetch. diff --git a/internal/file/file.go b/internal/file/file.go index dfe8434176..6bb085e377 100644 --- a/internal/file/file.go +++ b/internal/file/file.go @@ -36,6 +36,10 @@ import ( "github.com/vdaas/vald/internal/sync/errgroup" ) +const ( + sep = string(os.PathSeparator) +) + // Open opens the file with the given path, flag and permission. // If the folder does not exists, create the folder. // If the file does not exist, create the file. @@ -476,8 +480,8 @@ func ListInDir(path string) ([]string, error) { if !exists { return nil, err } - if fi.Mode().IsDir() && !strings.HasSuffix(path, string(os.PathSeparator)) { - path += string(os.PathSeparator) + if fi.Mode().IsDir() && !strings.HasSuffix(path, sep) { + path += sep } path = filepath.Dir(path) files, err := filepath.Glob(Join(path, "*")) @@ -538,7 +542,7 @@ func Join(paths ...string) (path string) { } else { path = replacer.Replace(paths[0]) } - if filepath.IsAbs(path) { + if filepath.IsAbs(path) || !Exists(path) { return filepath.Clean(path) } @@ -548,20 +552,24 @@ func Join(paths ...string) (path string) { log.Warn(err) return filepath.Clean(path) } - return filepath.Clean(joinFilePaths(root, path)) + abs := joinFilePaths(root, path) + if !Exists(abs) { + return filepath.Clean(path) + } + return filepath.Clean(abs) } var replacer = strings.NewReplacer( - string(os.PathSeparator)+string(os.PathSeparator)+string(os.PathSeparator), - string(os.PathSeparator), - string(os.PathSeparator)+string(os.PathSeparator), - string(os.PathSeparator), + sep+sep+sep, + sep, + sep+sep, + sep, ) func joinFilePaths(paths ...string) (path string) { for i, path := range paths { if path != "" { - return replacer.Replace(strings.Join(paths[i:], string(os.PathSeparator))) + return replacer.Replace(strings.Join(paths[i:], sep)) } } return "" diff --git a/internal/info/info.go b/internal/info/info.go index 6366b3b462..6abb9109c7 100644 --- a/internal/info/info.go +++ b/internal/info/info.go @@ -37,9 +37,10 @@ type Info interface { } type info struct { - baseURL string // e.g https://github.com/vdaas/vald/tree/main - detail Detail - prepOnce sync.Once + baseURL string // e.g https://github.com/vdaas/vald/tree/main + detail Detail + prepOnce sync.Once + valdReplacer *strings.Replacer // runtime functions rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) @@ -115,13 +116,14 @@ var ( ) const ( - goSrc = "go/src/" - goSrcLen = len(goSrc) - goMod = "go/pkg/mod/" - goModLen = len(goMod) - cgoTrue = "true" - cgoFalse = "false" - cgoUnknown = "unknown" + goSrc = "go/src/" + goSrcLen = len(goSrc) + goMod = "go/pkg/mod/" + goModLen = len(goMod) + cgoTrue = "true" + cgoFalse = "false" + cgoUnknown = "unknown" + googleGolang = "google.golang.org" ) // Init initializes Detail object only once. @@ -280,7 +282,7 @@ func (d Detail) String() string { return "\n" + strings.Join(strs, "\n") } -// Get returns parased Detail object. +// Get returns parsed Detail object. func (i *info) Get() Detail { i.prepare() return i.getDetail() @@ -298,11 +300,35 @@ func (i info) getDetail() Detail { if funcName == "runtime.main" { break } + index := strings.LastIndex(funcName, "/") + if index != -1 { + funcName = funcName[index+1:] + } url := i.baseURL var idx int switch { case strings.HasPrefix(file, i.detail.GoRoot+"/src"): - url = "https://github.com/golang/go/blob/" + i.detail.GoVersion + strings.TrimPrefix(file, i.detail.GoRoot) + "#L" + strconv.Itoa(line) + url = "https://github.com/golang/go/blob/" + i.detail.GoVersion + strings.TrimPrefix(file, i.detail.GoRoot) + case strings.HasPrefix(file, "runtime"): + url = "https://github.com/golang/go/blob/" + i.detail.GoVersion + "/src/" + file + case strings.HasPrefix(file, googleGolang+"/grpc"): + // google.golang.org/grpc@v1.65.0/server.go to https://github.com/grpc/grpc-go/blob/v1.65.0/server.go + url = "https://github.com/grpc/grpc-go/blob/" + _, versionSource, ok := strings.Cut(file, "@") + if ok && versionSource != "" { + url += versionSource + } else { + url = strings.ReplaceAll(file, googleGolang+"/grpc@", url) + } + case strings.HasPrefix(file, googleGolang+"/protobuf"): + // google.golang.org/protobuf@v1.34.0/proto/decode.go to https://github.com/protocolbuffers/protobuf-go/blob/v1.34.0/proto/decode.go + url = "https://github.com/protocolbuffers/protobuf-go/blob/" + _, versionSource, ok := strings.Cut(file, "@") + if ok && versionSource != "" { + url += versionSource + } else { + url = strings.ReplaceAll(file, googleGolang+"/protobuf@", url) + } case func() bool { idx = strings.Index(file, goMod) return idx >= 0 @@ -319,15 +345,16 @@ func (i info) getDetail() Detail { } url += "/" + path } - url += "#L" + strconv.Itoa(line) case func() bool { idx = strings.Index(file, goSrc) return idx >= 0 && strings.Index(file, valdRepo) >= 0 }(): - url = strings.Replace(file[idx+goSrcLen:]+"#L"+strconv.Itoa(line), valdRepo, "https://"+valdRepo+"/blob/"+i.detail.GitCommit, -1) + url = i.valdReplacer.Replace(file[idx+goSrcLen:]) case strings.HasPrefix(file, valdRepo): - url = fmt.Sprintf("%s#L%d", strings.Replace(file, valdRepo, "https://"+valdRepo+"/blob/"+i.detail.GitCommit, -1), line) + url = i.valdReplacer.Replace(file) } + url += "#L" + strconv.Itoa(line) + i.detail.StackTrace = append(i.detail.StackTrace, StackTrace{ FuncName: funcName, File: file, @@ -364,7 +391,7 @@ func (i *info) prepare() { if i.detail.CGOEnabled == "" && CGOEnabled != "" { i.detail.CGOEnabled = CGOEnabled } - switch i.detail.CGOEnabled { + switch CGOEnabled { case "0", cgoFalse: i.detail.CGOEnabled = cgoFalse case "1", cgoTrue: @@ -390,9 +417,16 @@ func (i *info) prepare() { if len(i.detail.GoroutineCount) == 0 { i.detail.GoroutineCount = strconv.Itoa(runtime.NumGoroutine()) } + if i.valdReplacer == nil { + i.valdReplacer = strings.NewReplacer(valdRepo, "https://"+valdRepo+"/blob/"+i.detail.GitCommit) + } }) } func (s StackTrace) String() string { return "URL: " + s.URL + "\tFile: " + s.File + "\tLine: #" + strconv.Itoa(s.Line) + "\tFuncName: " + s.FuncName } + +func (s StackTrace) ShortString() string { + return s.URL + " " + s.FuncName +} diff --git a/internal/k8s/job/job.go b/internal/k8s/job/job.go index 6f4492602f..cab6527502 100644 --- a/internal/k8s/job/job.go +++ b/internal/k8s/job/job.go @@ -16,13 +16,13 @@ package job import ( "context" "reflect" - "strings" - "sync" "time" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/k8s" "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/strings" + "github.com/vdaas/vald/internal/sync" batchv1 "k8s.io/api/batch/v1" k8serrors "k8s.io/apimachinery/pkg/api/errors" "sigs.k8s.io/controller-runtime/pkg/builder" diff --git a/internal/log/level/level.go b/internal/log/level/level.go index 6f1a78ac40..ac45ef36d0 100644 --- a/internal/log/level/level.go +++ b/internal/log/level/level.go @@ -56,20 +56,44 @@ func (l Level) String() string { } func Atol(str string) Level { - str = strings.ToUpper(str) - for i := len(str); i > 0; i-- { - switch str[:i] { - case DEBUG.String(), "DEB", "DEBG", "DB", "DBG", "D": - return DEBUG - case INFO.String(), "IFO", "INF", "IF", "IN", "I": - return INFO - case WARN.String(), "WARNING", "WAR", "WRN", "WN", "W": - return WARN - case ERROR.String(), "ERROR", "ERRO", "ER", "ERR", "E": - return ERROR - case FATAL.String(), "FATA", "FAT", "FT", "FL", "F": - return FATAL - } + l, ok := map[string]Level{ + DEBUG.String(): DEBUG, + DEBUG.String() + "S": DEBUG, + "D": DEBUG, + "DB": DEBUG, + "DBG": DEBUG, + "DEB": DEBUG, + "DEBG": DEBUG, + INFO.String(): INFO, + INFO.String() + "S": INFO, + "I": INFO, + "IF": INFO, + "IFO": INFO, + "IN": INFO, + "INF": INFO, + WARN.String(): WARN, + WARN.String() + "S": WARN, + "W": WARN, + "WAR": WARN, + "WARNING": WARN, + "WN": WARN, + "WRN": WARN, + ERROR.String(): ERROR, + ERROR.String() + "S": ERROR, + "E": ERROR, + "ER": ERROR, + "ERR": ERROR, + "ERRO": ERROR, + FATAL.String(): FATAL, + FATAL.String() + "S": FATAL, + "F": FATAL, + "FAT": FATAL, + "FATA": FATAL, + "FL": FATAL, + "FT": FATAL, + }[strings.ToUpper(str)] + if ok { + return l } return Unknown } diff --git a/internal/log/option_test.go b/internal/log/option_test.go index e1557f1a93..c1018f832e 100644 --- a/internal/log/option_test.go +++ b/internal/log/option_test.go @@ -148,7 +148,7 @@ func TestWithLoggerType(t *testing.T) { { name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), @@ -227,7 +227,7 @@ func TestWithLevel(t *testing.T) { { name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), @@ -312,7 +312,7 @@ func TestWithFormat(t *testing.T) { return test{ name: "set nothing when str is invalid", args: args{ - str: "valdvaldinvalid", + str: "invalid", }, want: want{ obj: new(T), diff --git a/internal/net/dialer.go b/internal/net/dialer.go index 5b91c34343..08ffd74bd4 100644 --- a/internal/net/dialer.go +++ b/internal/net/dialer.go @@ -30,6 +30,7 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/control" + "github.com/vdaas/vald/internal/net/quic" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/sync" @@ -123,7 +124,7 @@ func NewDialer(opts ...DialerOption) (der Dialer, err error) { if d.dnsCache, err = cache.New( cache.WithExpireDuration[*dialerCache](d.dnsCacheExpirationStr), cache.WithExpireCheckDuration[*dialerCache](d.dnsRefreshDurationStr), - cache.WithExpiredHook[*dialerCache](d.cacheExpireHook), + cache.WithExpiredHook(d.cacheExpireHook), ); err != nil { return nil, err } @@ -246,7 +247,22 @@ func (d *dialer) cachedDialer(ctx context.Context, network, addr string) (conn C if err != nil { return nil, err } - port = strconv.FormatUint(uint64(nport), 10) + if nport != 0 { + port = strconv.FormatUint(uint64(nport), 10) + } else { + const ( + defaultTCPPort = "80" + defaultUDPPort = "53" + ) + switch network { + case TCP.String(), TCP4.String(), TCP6.String(): + port = defaultTCPPort + case UDP.String(), UDP4.String(), UDP6.String(): + port = defaultUDPPort + default: + log.Warnf("Unknown network type: %s. Port will be empty.", network) + } + } d.addrs.Store(addr, &addrInfo{ host: host, port: port, @@ -295,6 +311,17 @@ func (d *dialer) cachedDialer(ctx context.Context, network, addr string) (conn C return d.dial(ctx, network, addr) } +func isQUICDial(network, addr string) bool { + if !IsUDP(network) { + return false + } + host, port, err := SplitHostPort(addr) + if err != nil || host == "" || port == 0 { + return false + } + return port != 53 +} + func (d *dialer) dial(ctx context.Context, network, addr string) (conn Conn, err error) { ctx, span := trace.StartSpan(ctx, apiName+"/Dialer.dial") defer func() { @@ -302,26 +329,33 @@ func (d *dialer) dial(ctx context.Context, network, addr string) (conn Conn, err span.End() } }() + if NetworkTypeFromString(network) == Unknown { + network = TCP.String() + } + if addr == "" { + return nil, errors.ErrInvalidAddress(network, addr) + } log.Debugf("%s connection dialing to addr %s", network, addr) - err = safety.RecoverWithoutPanicFunc(func() error { - conn, err = d.der.DialContext(ctx, network, addr) + err = safety.RecoverWithoutPanicFunc(func() (err error) { + if isQUICDial(network, addr) { + conn, err = quic.DialContext(ctx, addr, d.tlsConfig) + } else { + if IsUDP(network) { + network = TCP.String() + } + conn, err = d.der.DialContext(ctx, network, addr) + } return err })() if err != nil { - defer func(conn Conn) { - if conn != nil { - if err != nil { - err = errors.Join(conn.Close(), err) - return - } - err = conn.Close() - } - }(conn) + if conn != nil { + err = errors.Join(conn.Close(), err) + } return nil, err } d.tmu.RLock() - if d.tlsConfig != nil { + if !IsUDP(network) && d.tlsConfig != nil { d.tmu.RUnlock() return d.tlsHandshake(ctx, conn, network, addr) } @@ -408,15 +442,12 @@ func (d *dialer) tlsHandshake( })() } if err != nil || conn == nil { - defer func(conn Conn) { - if conn != nil { - if err != nil { - err = errors.Join(conn.Close(), err) - return - } - err = conn.Close() + if conn != nil { + if err != nil { + return nil, errors.Join(conn.Close(), err) } - }(conn) + return nil, conn.Close() + } return nil, err } tconn, ok := conn.(*tls.Conn) @@ -439,11 +470,15 @@ func (d *dialer) tlsHandshake( return tconn, nil } -func (d *dialer) cacheExpireHook(ctx context.Context, addr string) { +func (d *dialer) cacheExpireHook(ctx context.Context, addr string, dc *dialerCache) { if err := safety.RecoverFunc(func() (err error) { _, err = d.lookup(ctx, addr) return })(); err != nil { - log.Errorf("dns cache expiration hook process returned error: %v\tfor addr:\t%s", err, addr) + if dc != nil { + log.Errorf("dns cache expiration hook process returned error: %v\tfor addr:\t%s\tips: %v\tlen: %d", err, addr, dc.ips, dc.Len()) + } else { + log.Errorf("dns cache expiration hook process returned error: %v\tfor addr:\t%s", err, addr) + } } } diff --git a/internal/net/dialer_test.go b/internal/net/dialer_test.go index 209e0517b3..2cf8ffe9ac 100644 --- a/internal/net/dialer_test.go +++ b/internal/net/dialer_test.go @@ -1163,7 +1163,7 @@ func Test_dialer_cachedDialer(t *testing.T) { // check the connection made on the same port _, p, _ := net.SplitHostPort(gotConn.RemoteAddr().String()) if p != strconv.Itoa(int(port)) { - return errors.Errorf("unexcepted port number, except: %d, got: %s", port, p) + return errors.Errorf("unexpected port number, except: %d, got: %s", port, p) } // read the output from the server and check if it is equals to the count @@ -1425,7 +1425,7 @@ func Test_dialer_dial(t *testing.T) { return nil }, want: want{ - err: errors.New("missing address"), + err: errors.ErrInvalidAddress(TCP.String(), ""), }, }, { @@ -1452,7 +1452,7 @@ func Test_dialer_dial(t *testing.T) { return nil }, want: want{ - err: net.UnknownNetworkError("invalid"), + err: errors.ErrInvalidAddress(TCP.String(), ""), }, }, { @@ -1477,7 +1477,7 @@ func Test_dialer_dial(t *testing.T) { return nil }, want: want{ - err: net.UnknownNetworkError(""), + err: errors.ErrInvalidAddress(TCP.String(), ""), }, }, } @@ -1516,7 +1516,8 @@ func Test_dialer_dial(t *testing.T) { func Test_dialer_cacheExpireHook(t *testing.T) { t.Parallel() type args struct { - addr string + addr string + cache *dialerCache } type want struct{} type test struct { @@ -1585,7 +1586,7 @@ func Test_dialer_cacheExpireHook(t *testing.T) { test.beforeFunc(d) } - d.cacheExpireHook(ctx, test.args.addr) + d.cacheExpireHook(ctx, test.args.addr, test.args.cache) if err := checkFunc(d); err != nil { tt.Errorf("error = %v", err) } diff --git a/internal/net/grpc/client.go b/internal/net/grpc/client.go index 806245341d..78fc51448b 100644 --- a/internal/net/grpc/client.go +++ b/internal/net/grpc/client.go @@ -19,7 +19,9 @@ package grpc import ( "context" + "maps" "math" + "slices" "sync/atomic" "time" @@ -87,30 +89,32 @@ type Client interface { GetDialOption() []DialOption GetCallOption() []CallOption GetBackoff() backoff.Backoff + SetDisableResolveDNSAddr(addr string, disabled bool) ConnectedAddrs() []string Close(ctx context.Context) error } type gRPCClient struct { - addrs map[string]struct{} - poolSize uint64 - clientCount uint64 - conns sync.Map[string, pool.Conn] - hcDur time.Duration - prDur time.Duration - dialer net.Dialer - enablePoolRebalance bool - resolveDNS bool - dopts []DialOption - copts []CallOption - roccd string // reconnection old connection closing duration - eg errgroup.Group - bo backoff.Backoff - cb circuitbreaker.CircuitBreaker - gbo gbackoff.Config // grpc's original backoff configuration - mcd time.Duration // minimum connection timeout duration - group singleflight.Group[pool.Conn] - crl sync.Map[string, bool] // connection request list + addrs map[string]struct{} + poolSize uint64 + clientCount uint64 + conns sync.Map[string, pool.Conn] + hcDur time.Duration + prDur time.Duration + dialer net.Dialer + enablePoolRebalance bool + disableResolveDNSAddrs sync.Map[string, bool] + resolveDNS bool + dopts []DialOption + copts []CallOption + roccd string // reconnection old connection closing duration + eg errgroup.Group + bo backoff.Backoff + cb circuitbreaker.CircuitBreaker + gbo gbackoff.Config // grpc's original backoff configuration + mcd time.Duration // minimum connection timeout duration + group singleflight.Group[pool.Conn] + crl sync.Map[string, bool] // connection request list ech <-chan error monitorRunning atomic.Bool @@ -142,6 +146,9 @@ func New(opts ...Option) (c Client) { MinConnectTimeout: g.mcd, }, )) + if g.copts != nil && len(g.copts) != 0 { + g.dopts = append(g.dopts, grpc.WithDefaultCallOptions(g.copts...)) + } g.monitorRunning.Store(false) return g } @@ -153,11 +160,7 @@ func (g *gRPCClient) StartConnectionMonitor(ctx context.Context) (<-chan error, } g.monitorRunning.Store(true) - addrs := make([]string, len(g.addrs)) - for addr := range g.addrs { - addrs = append(addrs, addr) - } - + addrs := slices.Collect(maps.Keys(g.addrs)) if g.dialer != nil { g.dialer.StartDialerCache(ctx) } @@ -165,7 +168,7 @@ func (g *gRPCClient) StartConnectionMonitor(ctx context.Context) (<-chan error, ech := make(chan error, len(addrs)) for _, addr := range addrs { if addr != "" { - _, err := g.Connect(ctx, addr, grpc.WithBlock()) + _, err := g.Connect(ctx, addr) if err != nil { if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) && @@ -946,6 +949,12 @@ func (g *gRPCClient) GetBackoff() backoff.Backoff { return g.bo } +func (g *gRPCClient) SetDisableResolveDNSAddr(addr string, disabled bool) { + // NOTE: When connecting to multiple locations, it was necessary to switch dynamically, so implementation was added. + // There is no setting for disable on the helm chart side, so I used this implementation. + g.disableResolveDNSAddrs.Store(addr, disabled) +} + func (g *gRPCClient) Connect( ctx context.Context, addr string, dopts ...DialOption, ) (conn pool.Conn, err error) { @@ -975,7 +984,13 @@ func (g *gRPCClient) Connect( pool.WithAddr(addr), pool.WithSize(g.poolSize), pool.WithDialOptions(append(g.dopts, dopts...)...), - pool.WithResolveDNS(g.resolveDNS), + pool.WithResolveDNS(func() bool { + disabled, ok := g.disableResolveDNSAddrs.Load(addr) + if ok && disabled { + return false + } + return g.resolveDNS + }()), } if g.bo != nil { opts = append(opts, pool.WithBackoff(g.bo)) diff --git a/internal/net/grpc/client_test.go b/internal/net/grpc/client_test.go index 394c240818..0d073ce858 100644 --- a/internal/net/grpc/client_test.go +++ b/internal/net/grpc/client_test.go @@ -109,28 +109,29 @@ package grpc // ctx context.Context // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // want <-chan error @@ -171,6 +172,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -214,6 +216,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -258,28 +261,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // got, err := g.StartConnectionMonitor(test.args.ctx) @@ -296,28 +300,29 @@ package grpc // f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // err error @@ -355,6 +360,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -399,6 +405,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -443,28 +450,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // err := g.Range(test.args.ctx, test.args.f) @@ -482,28 +490,29 @@ package grpc // f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // err error @@ -542,6 +551,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -587,6 +597,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -631,28 +642,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // err := g.RangeConcurrent(test.args.ctx, test.args.concurrency, test.args.f) @@ -670,28 +682,29 @@ package grpc // f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // err error @@ -730,6 +743,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -775,6 +789,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -819,28 +834,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // err := g.OrderedRange(test.args.ctx, test.args.orders, test.args.f) @@ -859,28 +875,29 @@ package grpc // f func(ctx context.Context, addr string, conn *ClientConn, copts ...CallOption) error // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // err error @@ -920,6 +937,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -966,6 +984,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1010,28 +1029,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // err := g.OrderedRangeConcurrent(test.args.ctx, test.args.orders, test.args.concurrency, test.args.f) @@ -1048,28 +1068,29 @@ package grpc // f func(ctx context.Context, conn *ClientConn, copts ...CallOption) (any, error) // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // wantData any @@ -1111,6 +1132,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1155,6 +1177,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1199,28 +1222,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // gotData, err := g.RoundRobin(test.args.ctx, test.args.f) @@ -1238,28 +1262,29 @@ package grpc // f func(ctx context.Context, conn *ClientConn, copts ...CallOption) (any, error) // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // wantData any @@ -1302,6 +1327,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1347,6 +1373,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1391,28 +1418,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // gotData, err := g.Do(test.args.ctx, test.args.addr, test.args.f) @@ -1432,28 +1460,29 @@ package grpc // f func(ctx context.Context, conn *ClientConn, copts ...CallOption) (any, error) // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // wantData any @@ -1498,6 +1527,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1545,6 +1575,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1589,28 +1620,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // gotData, err := g.connectWithBackoff(test.args.ctx, test.args.p, test.args.addr, test.args.enableBackoff, test.args.f) @@ -1623,28 +1655,29 @@ package grpc // // func Test_gRPCClient_GetDialOption(t *testing.T) { // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // want []DialOption @@ -1677,6 +1710,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1717,6 +1751,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1761,28 +1796,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // got := g.GetDialOption() @@ -1795,28 +1831,29 @@ package grpc // // func Test_gRPCClient_GetCallOption(t *testing.T) { // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // want []CallOption @@ -1849,6 +1886,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1889,6 +1927,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -1933,28 +1972,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // got := g.GetCallOption() @@ -1967,28 +2007,29 @@ package grpc // // func Test_gRPCClient_GetBackoff(t *testing.T) { // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // want backoff.Backoff @@ -2021,6 +2062,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2061,6 +2103,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2105,28 +2148,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // got := g.GetBackoff() @@ -2137,6 +2181,190 @@ package grpc // } // } // +// func Test_gRPCClient_SetDisableResolveDNSAddr(t *testing.T) { +// type args struct { +// addr string +// disabled bool +// } +// type fields struct { +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// addr:"", +// disabled:false, +// }, +// fields: fields { +// addrs:nil, +// poolSize:0, +// clientCount:0, +// conns:nil, +// hcDur:nil, +// prDur:nil, +// dialer:nil, +// enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, +// resolveDNS:false, +// dopts:nil, +// copts:nil, +// roccd:"", +// eg:nil, +// bo:nil, +// cb:nil, +// gbo:nil, +// mcd:nil, +// group:nil, +// crl:nil, +// ech:nil, +// monitorRunning:nil, +// stopMonitor:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// addr:"", +// disabled:false, +// }, +// fields: fields { +// addrs:nil, +// poolSize:0, +// clientCount:0, +// conns:nil, +// hcDur:nil, +// prDur:nil, +// dialer:nil, +// enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, +// resolveDNS:false, +// dopts:nil, +// copts:nil, +// roccd:"", +// eg:nil, +// bo:nil, +// cb:nil, +// gbo:nil, +// mcd:nil, +// group:nil, +// crl:nil, +// ech:nil, +// monitorRunning:nil, +// stopMonitor:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// g := &gRPCClient{ +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, +// } +// +// g.SetDisableResolveDNSAddr(test.args.addr, test.args.disabled) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_gRPCClient_Connect(t *testing.T) { // type args struct { // ctx context.Context @@ -2144,28 +2372,29 @@ package grpc // dopts []DialOption // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // wantConn pool.Conn @@ -2208,6 +2437,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2253,6 +2483,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2297,28 +2528,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // gotConn, err := g.Connect(test.args.ctx, test.args.addr, test.args.dopts...) @@ -2335,28 +2567,29 @@ package grpc // addr string // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // want bool @@ -2394,6 +2627,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2438,6 +2672,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2482,28 +2717,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // got := g.IsConnected(test.args.ctx, test.args.addr) @@ -2520,28 +2756,29 @@ package grpc // addr string // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // err error @@ -2579,6 +2816,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2623,6 +2861,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2667,28 +2906,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // err := g.Disconnect(test.args.ctx, test.args.addr) @@ -2701,28 +2941,29 @@ package grpc // // func Test_gRPCClient_ConnectedAddrs(t *testing.T) { // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // wantAddrs []string @@ -2755,6 +2996,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2795,6 +3037,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2839,28 +3082,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // gotAddrs := g.ConnectedAddrs() @@ -2876,28 +3120,29 @@ package grpc // ctx context.Context // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // err error @@ -2934,6 +3179,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -2977,6 +3223,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -3021,28 +3268,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // err := g.Close(test.args.ctx) @@ -3058,28 +3306,29 @@ package grpc // fn func(addr string, p pool.Conn) bool // } // type fields struct { -// addrs map[string]struct{} -// poolSize uint64 -// clientCount uint64 -// conns sync.Map[string, pool.Conn] -// hcDur time.Duration -// prDur time.Duration -// dialer net.Dialer -// enablePoolRebalance bool -// resolveDNS bool -// dopts []DialOption -// copts []CallOption -// roccd string -// eg errgroup.Group -// bo backoff.Backoff -// cb circuitbreaker.CircuitBreaker -// gbo gbackoff.Config -// mcd time.Duration -// group singleflight.Group[pool.Conn] -// crl sync.Map[string, bool] -// ech <-chan error -// monitorRunning atomic.Bool -// stopMonitor context.CancelFunc +// addrs map[string]struct{} +// poolSize uint64 +// clientCount uint64 +// conns sync.Map[string, pool.Conn] +// hcDur time.Duration +// prDur time.Duration +// dialer net.Dialer +// enablePoolRebalance bool +// disableResolveDNSAddrs sync.Map[string, bool] +// resolveDNS bool +// dopts []DialOption +// copts []CallOption +// roccd string +// eg errgroup.Group +// bo backoff.Backoff +// cb circuitbreaker.CircuitBreaker +// gbo gbackoff.Config +// mcd time.Duration +// group singleflight.Group[pool.Conn] +// crl sync.Map[string, bool] +// ech <-chan error +// monitorRunning atomic.Bool +// stopMonitor context.CancelFunc // } // type want struct { // err error @@ -3116,6 +3365,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -3159,6 +3409,7 @@ package grpc // prDur:nil, // dialer:nil, // enablePoolRebalance:false, +// disableResolveDNSAddrs:nil, // resolveDNS:false, // dopts:nil, // copts:nil, @@ -3203,28 +3454,29 @@ package grpc // checkFunc = defaultCheckFunc // } // g := &gRPCClient{ -// addrs: test.fields.addrs, -// poolSize: test.fields.poolSize, -// clientCount: test.fields.clientCount, -// conns: test.fields.conns, -// hcDur: test.fields.hcDur, -// prDur: test.fields.prDur, -// dialer: test.fields.dialer, -// enablePoolRebalance: test.fields.enablePoolRebalance, -// resolveDNS: test.fields.resolveDNS, -// dopts: test.fields.dopts, -// copts: test.fields.copts, -// roccd: test.fields.roccd, -// eg: test.fields.eg, -// bo: test.fields.bo, -// cb: test.fields.cb, -// gbo: test.fields.gbo, -// mcd: test.fields.mcd, -// group: test.fields.group, -// crl: test.fields.crl, -// ech: test.fields.ech, -// monitorRunning: test.fields.monitorRunning, -// stopMonitor: test.fields.stopMonitor, +// addrs: test.fields.addrs, +// poolSize: test.fields.poolSize, +// clientCount: test.fields.clientCount, +// conns: test.fields.conns, +// hcDur: test.fields.hcDur, +// prDur: test.fields.prDur, +// dialer: test.fields.dialer, +// enablePoolRebalance: test.fields.enablePoolRebalance, +// disableResolveDNSAddrs: test.fields.disableResolveDNSAddrs, +// resolveDNS: test.fields.resolveDNS, +// dopts: test.fields.dopts, +// copts: test.fields.copts, +// roccd: test.fields.roccd, +// eg: test.fields.eg, +// bo: test.fields.bo, +// cb: test.fields.cb, +// gbo: test.fields.gbo, +// mcd: test.fields.mcd, +// group: test.fields.group, +// crl: test.fields.crl, +// ech: test.fields.ech, +// monitorRunning: test.fields.monitorRunning, +// stopMonitor: test.fields.stopMonitor, // } // // err := g.rangeConns(test.args.fn) diff --git a/internal/net/grpc/codes/codes.go b/internal/net/grpc/codes/codes.go index 39f6c79044..4a8626c7a2 100644 --- a/internal/net/grpc/codes/codes.go +++ b/internal/net/grpc/codes/codes.go @@ -40,3 +40,48 @@ var ( DataLoss = codes.DataLoss Unauthenticated = codes.Unauthenticated ) + +type CodeType interface { + int | int8 | int32 | int64 | uint | uint8 | uint32 | uint64 | Code +} + +func ToString[T CodeType](c T) string { + switch Code(c) { + case OK: + return "OK" + case Canceled: + return "Canceled" + case Unknown: + return "Unknown" + case InvalidArgument: + return "InvalidArgument" + case DeadlineExceeded: + return "DeadlineExceeded" + case NotFound: + return "NotFound" + case AlreadyExists: + return "AlreadyExists" + case PermissionDenied: + return "PermissionDenied" + case ResourceExhausted: + return "ResourceExhausted" + case FailedPrecondition: + return "FailedPrecondition" + case Aborted: + return "Aborted" + case OutOfRange: + return "OutOfRange" + case Unimplemented: + return "Unimplemented" + case Internal: + return "Internal" + case Unavailable: + return "Unavailable" + case DataLoss: + return "DataLoss" + case Unauthenticated: + return "Unauthenticated" + default: + return "InvalidStatus" + } +} diff --git a/internal/net/grpc/codes/codes_test.go b/internal/net/grpc/codes/codes_test.go new file mode 100644 index 0000000000..9c3feea2d2 --- /dev/null +++ b/internal/net/grpc/codes/codes_test.go @@ -0,0 +1,101 @@ +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package codes + +// NOT IMPLEMENTED BELOW +// +// func TestToString(t *testing.T) { +// type args struct { +// c T +// } +// type want struct { +// want string +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got string) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// c:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := ToString(test.args.c) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/net/grpc/errdetails/errdetails.go b/internal/net/grpc/errdetails/errdetails.go index cb602f002e..1636f369b4 100644 --- a/internal/net/grpc/errdetails/errdetails.go +++ b/internal/net/grpc/errdetails/errdetails.go @@ -54,24 +54,25 @@ const ( ValdResourceOwner = "vdaas.org vald team " ValdGRPCResourceTypePrefix = "github.com/vdaas/vald/apis/grpc/v1" - typePrefix = "type.googleapis.com/google.rpc." + typePrefix = "type.googleapis.com/google.rpc." + typePrefixV1 = "type.googleapis.com/rpc.v1." ) var ( - debugInfoMessageName = new(DebugInfo).ProtoReflect().Descriptor().FullName().Name() - errorInfoMessageName = new(ErrorInfo).ProtoReflect().Descriptor().FullName().Name() - badRequestMessageName = new(BadRequest).ProtoReflect().Descriptor().FullName().Name() - badRequestFieldViolationMessageName = new(BadRequestFieldViolation).ProtoReflect().Descriptor().FullName().Name() - localizedMessageMessageName = new(LocalizedMessage).ProtoReflect().Descriptor().FullName().Name() - preconditionFailureMessageName = new(PreconditionFailure).ProtoReflect().Descriptor().FullName().Name() - preconditionFailureViolationMessageName = new(PreconditionFailureViolation).ProtoReflect().Descriptor().FullName().Name() - helpMessageName = new(Help).ProtoReflect().Descriptor().FullName().Name() - helpLinkMessageName = new(HelpLink).ProtoReflect().Descriptor().FullName().Name() - quotaFailureMessageName = new(QuotaFailure).ProtoReflect().Descriptor().FullName().Name() - quotaFailureViolationMessageName = new(QuotaFailureViolation).ProtoReflect().Descriptor().FullName().Name() - requestInfoMessageName = new(RequestInfo).ProtoReflect().Descriptor().FullName().Name() - resourceInfoMessageName = new(ResourceInfo).ProtoReflect().Descriptor().FullName().Name() - retryInfoMessageName = new(RetryInfo).ProtoReflect().Descriptor().FullName().Name() + DebugInfoMessageName = string(new(DebugInfo).ProtoReflect().Descriptor().FullName().Name()) + ErrorInfoMessageName = string(new(ErrorInfo).ProtoReflect().Descriptor().FullName().Name()) + BadRequestMessageName = string(new(BadRequest).ProtoReflect().Descriptor().FullName().Name()) + BadRequestFieldViolationMessageName = string(new(BadRequestFieldViolation).ProtoReflect().Descriptor().FullName().Name()) + LocalizedMessageMessageName = string(new(LocalizedMessage).ProtoReflect().Descriptor().FullName().Name()) + PreconditionFailureMessageName = string(new(PreconditionFailure).ProtoReflect().Descriptor().FullName().Name()) + PreconditionFailureViolationMessageName = string(new(PreconditionFailureViolation).ProtoReflect().Descriptor().FullName().Name()) + HelpMessageName = string(new(Help).ProtoReflect().Descriptor().FullName().Name()) + HelpLinkMessageName = string(new(HelpLink).ProtoReflect().Descriptor().FullName().Name()) + QuotaFailureMessageName = string(new(QuotaFailure).ProtoReflect().Descriptor().FullName().Name()) + QuotaFailureViolationMessageName = string(new(QuotaFailureViolation).ProtoReflect().Descriptor().FullName().Name()) + RequestInfoMessageName = string(new(RequestInfo).ProtoReflect().Descriptor().FullName().Name()) + ResourceInfoMessageName = string(new(ResourceInfo).ProtoReflect().Descriptor().FullName().Name()) + RetryInfoMessageName = string(new(RetryInfo).ProtoReflect().Descriptor().FullName().Name()) ) type Detail struct { @@ -79,6 +80,85 @@ type Detail struct { Message proto.Message `json:"message,omitempty" yaml:"message"` } +func (d *Detail) MarshalJSON() (body []byte, err error) { + if d == nil { + return nil, nil + } + switch strings.TrimPrefix(strings.TrimPrefix(d.TypeURL, typePrefix), typePrefixV1) { + case DebugInfoMessageName: + m, ok := d.Message.(*DebugInfo) + if ok { + return json.Marshal(m) + } + case ErrorInfoMessageName: + m, ok := d.Message.(*ErrorInfo) + if ok { + return json.Marshal(m) + } + case BadRequestFieldViolationMessageName: + m, ok := d.Message.(*BadRequestFieldViolation) + if ok { + return json.Marshal(m) + } + case BadRequestMessageName: + m, ok := d.Message.(*BadRequest) + if ok { + return json.Marshal(m) + } + case LocalizedMessageMessageName: + m, ok := d.Message.(*LocalizedMessage) + if ok { + return json.Marshal(m) + } + case PreconditionFailureViolationMessageName: + m, ok := d.Message.(*PreconditionFailureViolation) + if ok { + return json.Marshal(m) + } + case PreconditionFailureMessageName: + m, ok := d.Message.(*PreconditionFailure) + if ok { + return json.Marshal(m) + } + case HelpLinkMessageName: + m, ok := d.Message.(*HelpLink) + if ok { + return json.Marshal(m) + } + case HelpMessageName: + m, ok := d.Message.(*Help) + if ok { + return json.Marshal(m) + } + case QuotaFailureViolationMessageName: + m, ok := d.Message.(*QuotaFailureViolation) + if ok { + return json.Marshal(m) + } + case QuotaFailureMessageName: + m, ok := d.Message.(*QuotaFailure) + if ok { + return json.Marshal(m) + } + case RequestInfoMessageName: + m, ok := d.Message.(*RequestInfo) + if ok { + return json.Marshal(m) + } + case ResourceInfoMessageName: + m, ok := d.Message.(*ResourceInfo) + if ok { + return json.Marshal(m) + } + case RetryInfoMessageName: + m, ok := d.Message.(*RetryInfo) + if ok { + return json.Marshal(m) + } + } + return json.Marshal(d) +} + func decodeDetails(objs ...any) (details []Detail) { if objs == nil { return nil @@ -234,86 +314,86 @@ func AnyToErrorDetail(a *types.Any) proto.Message { return nil } var err error - switch proto.Name(strings.TrimPrefix(a.GetTypeUrl(), typePrefix)) { - case debugInfoMessageName: + switch strings.TrimPrefix(strings.TrimPrefix(a.GetTypeUrl(), typePrefix), typePrefixV1) { + case DebugInfoMessageName: var m DebugInfo err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case errorInfoMessageName: + case ErrorInfoMessageName: var m ErrorInfo err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case badRequestFieldViolationMessageName: + case BadRequestFieldViolationMessageName: var m BadRequestFieldViolation err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case badRequestMessageName: + case BadRequestMessageName: var m BadRequest err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case localizedMessageMessageName: + case LocalizedMessageMessageName: var m LocalizedMessage err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case preconditionFailureViolationMessageName: + case PreconditionFailureViolationMessageName: var m PreconditionFailureViolation err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case preconditionFailureMessageName: + case PreconditionFailureMessageName: var m PreconditionFailure err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case helpLinkMessageName: + case HelpLinkMessageName: var m HelpLink err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case helpMessageName: + case HelpMessageName: var m Help err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case quotaFailureViolationMessageName: + case QuotaFailureViolationMessageName: var m QuotaFailureViolation err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case quotaFailureMessageName: + case QuotaFailureMessageName: var m QuotaFailure err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case requestInfoMessageName: + case RequestInfoMessageName: var m RequestInfo err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case resourceInfoMessageName: + case ResourceInfoMessageName: var m ResourceInfo err = types.UnmarshalAny(a, &m) if err == nil { return &m } - case retryInfoMessageName: + case RetryInfoMessageName: var m RetryInfo err = types.UnmarshalAny(a, &m) if err == nil { @@ -331,12 +411,7 @@ func DebugInfoFromInfoDetail(v *info.Detail) (debug *DebugInfo) { if v.StackTrace != nil { debug.StackEntries = make([]string, 0, len(v.StackTrace)) for i, stack := range v.StackTrace { - debug.StackEntries = append(debug.GetStackEntries(), strings.Join([]string{ - "id:", - strconv.Itoa(i), - "stack_trace:", - stack.String(), - }, " ")) + debug.StackEntries = append(debug.GetStackEntries(), "id: "+strconv.Itoa(i)+" stack_trace: "+stack.ShortString()) } v.StackTrace = nil } diff --git a/internal/net/grpc/errdetails/errdetails_test.go b/internal/net/grpc/errdetails/errdetails_test.go index f8bda84952..79b5261103 100644 --- a/internal/net/grpc/errdetails/errdetails_test.go +++ b/internal/net/grpc/errdetails/errdetails_test.go @@ -111,3 +111,99 @@ func TestDebugInfoFromInfoDetail(t *testing.T) { } // NOT IMPLEMENTED BELOW +// +// func TestDetail_MarshalJSON(t *testing.T) { +// type fields struct { +// TypeURL string +// Message proto.Message +// } +// type want struct { +// wantBody []byte +// err error +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, []byte, error) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, gotBody []byte, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotBody, w.wantBody) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotBody, w.wantBody) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// TypeURL:"", +// Message:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// TypeURL:"", +// Message:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// d := &Detail{ +// TypeURL: test.fields.TypeURL, +// Message: test.fields.Message, +// } +// +// gotBody, err := d.MarshalJSON() +// if err := checkFunc(test.want, gotBody, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/net/grpc/health/health.go b/internal/net/grpc/health/health.go index a4a25f4969..b0a785a534 100644 --- a/internal/net/grpc/health/health.go +++ b/internal/net/grpc/health/health.go @@ -18,14 +18,19 @@ package health import ( + "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" "google.golang.org/grpc/health" - "google.golang.org/grpc/health/grpc_health_v1" + healthpb "google.golang.org/grpc/health/grpc_health_v1" ) // Register register the generic gRPC health check server implementation to the srv. -func Register(name string, srv *grpc.Server) { - server := health.NewServer() - grpc_health_v1.RegisterHealthServer(srv, server) - server.SetServingStatus(name, grpc_health_v1.HealthCheckResponse_SERVING) +func Register(srv *grpc.Server) { + hsrv := health.NewServer() + healthpb.RegisterHealthServer(srv, hsrv) + for api := range srv.GetServiceInfo() { + hsrv.SetServingStatus(api, healthpb.HealthCheckResponse_SERVING) + log.Debug("gRPC health check server registered for service:\t" + api) + } + hsrv.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) } diff --git a/internal/net/grpc/health/health_test.go b/internal/net/grpc/health/health_test.go index bc9a90c11f..7aa2f78e42 100644 --- a/internal/net/grpc/health/health_test.go +++ b/internal/net/grpc/health/health_test.go @@ -23,6 +23,7 @@ import ( "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/test/goleak" + healthpb "google.golang.org/grpc/health/grpc_health_v1" ) func TestMain(m *testing.M) { @@ -32,8 +33,7 @@ func TestMain(m *testing.M) { func TestRegister(t *testing.T) { t.Parallel() type args struct { - name string - srv *grpc.Server + srv *grpc.Server } type want struct{} type test struct { @@ -53,11 +53,10 @@ func TestRegister(t *testing.T) { return test{ name: "success to register the health check server", args: args{ - name: "api health check", - srv: srv, + srv: srv, }, checkFunc: func(w want) error { - if _, ok := srv.GetServiceInfo()["grpc.health.v1.Health"]; !ok { + if _, ok := srv.GetServiceInfo()[healthpb.Health_ServiceDesc.ServiceName]; !ok { return errors.New("health check server not registered") } @@ -81,7 +80,7 @@ func TestRegister(t *testing.T) { test.checkFunc = defaultCheckFunc } - Register(test.args.name, test.args.srv) + Register(test.args.srv) if err := test.checkFunc(test.want); err != nil { tt.Errorf("error = %v", err) } diff --git a/internal/net/grpc/interceptor/client/metric/metric.go b/internal/net/grpc/interceptor/client/metric/metric.go index 62bef184c0..d4c9c032db 100644 --- a/internal/net/grpc/interceptor/client/metric/metric.go +++ b/internal/net/grpc/interceptor/client/metric/metric.go @@ -40,7 +40,7 @@ const ( func ClientMetricInterceptors() (grpc.UnaryClientInterceptor, grpc.StreamClientInterceptor, error) { meter := metrics.GetMeter() - latencyHistgram, err := meter.Float64Histogram( + latencyHistogram, err := meter.Float64Histogram( latencyMetricsName, metrics.WithDescription("Client latency in milliseconds, by method"), metrics.WithUnit(metrics.Milliseconds), @@ -60,7 +60,7 @@ func ClientMetricInterceptors() (grpc.UnaryClientInterceptor, grpc.StreamClientI record := func(ctx context.Context, method string, err error, latency float64) { attrs := attributesFromError(method, err) - latencyHistgram.Record(ctx, latency, metrics.WithAttributes(attrs...)) + latencyHistogram.Record(ctx, latency, metrics.WithAttributes(attrs...)) completedRPCCnt.Add(ctx, 1, metrics.WithAttributes(attrs...)) } return func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { diff --git a/internal/net/grpc/interceptor/server/logging/accesslog.go b/internal/net/grpc/interceptor/server/logging/accesslog.go index 85964aa70e..c2878ab100 100644 --- a/internal/net/grpc/interceptor/server/logging/accesslog.go +++ b/internal/net/grpc/interceptor/server/logging/accesslog.go @@ -19,11 +19,14 @@ package logging import ( "context" + "fmt" "path" "time" + "github.com/vdaas/vald/internal/encoding/json" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/trace" ) @@ -32,6 +35,7 @@ const ( grpcKindStream = "stream" rpcCompletedMessage = "rpc completed" + rpcFailedMessage = "rpc failed" ) type AccessLogEntity struct { @@ -49,6 +53,29 @@ type AccessLogGRPCEntity struct { Method string `json:"method,omitempty" yaml:"method"` } +func (e AccessLogEntity) String() (str string) { + var emsg string + if e.Error != nil { + st, ok := status.FromError(e.Error) + if ok && st != nil { + emsg = st.String() + } else { + emsg = e.Error.Error() + } + } + eb, err := json.Marshal(e) + if err != nil { + str = fmt.Sprintf("%#v,\tfailed to json.Marshal(AccessLogEntity) error: %v", e, err) + } else { + str = string(eb) + } + + if emsg != "" { + return str + ",\terror message: " + emsg + } + return str +} + func AccessLogInterceptor() grpc.UnaryServerInterceptor { return func( ctx context.Context, @@ -88,9 +115,9 @@ func AccessLogInterceptor() grpc.UnaryServerInterceptor { if err != nil { entity.Error = err - log.Warn(rpcCompletedMessage, entity) + log.Warn(rpcFailedMessage, entity.String()) } else { - log.Debug(rpcCompletedMessage, entity) + log.Debug(rpcCompletedMessage, entity.String()) } return resp, err @@ -136,9 +163,9 @@ func AccessLogStreamInterceptor() grpc.StreamServerInterceptor { if err != nil { entity.Error = err - log.Warn(rpcCompletedMessage, entity) + log.Warn(rpcFailedMessage, entity.String()) } else { - log.Debug(rpcCompletedMessage, entity) + log.Debug(rpcCompletedMessage, entity.String()) } return err diff --git a/internal/net/grpc/interceptor/server/logging/accesslog_test.go b/internal/net/grpc/interceptor/server/logging/accesslog_test.go index 8649ede1c8..b4ef22fcaa 100644 --- a/internal/net/grpc/interceptor/server/logging/accesslog_test.go +++ b/internal/net/grpc/interceptor/server/logging/accesslog_test.go @@ -29,6 +29,114 @@ func TestMain(m *testing.M) { // NOT IMPLEMENTED BELOW // +// func TestAccessLogEntity_String(t *testing.T) { +// type fields struct { +// GRPC *AccessLogGRPCEntity +// StartTime int64 +// EndTime int64 +// Latency int64 +// TraceID string +// Error error +// } +// type want struct { +// wantStr string +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, string) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, gotStr string) error { +// if !reflect.DeepEqual(gotStr, w.wantStr) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotStr, w.wantStr) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// GRPC:AccessLogGRPCEntity{}, +// StartTime:0, +// EndTime:0, +// Latency:0, +// TraceID:"", +// Error:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// GRPC:AccessLogGRPCEntity{}, +// StartTime:0, +// EndTime:0, +// Latency:0, +// TraceID:"", +// Error:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// e := AccessLogEntity{ +// GRPC: test.fields.GRPC, +// StartTime: test.fields.StartTime, +// EndTime: test.fields.EndTime, +// Latency: test.fields.Latency, +// TraceID: test.fields.TraceID, +// Error: test.fields.Error, +// } +// +// gotStr := e.String() +// if err := checkFunc(test.want, gotStr); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func TestAccessLogInterceptor(t *testing.T) { // type want struct { // want grpc.UnaryServerInterceptor diff --git a/internal/net/grpc/interceptor/server/metric/metric.go b/internal/net/grpc/interceptor/server/metric/metric.go index fb1366e23a..408fb9668d 100644 --- a/internal/net/grpc/interceptor/server/metric/metric.go +++ b/internal/net/grpc/interceptor/server/metric/metric.go @@ -36,7 +36,7 @@ const ( func MetricInterceptors() (grpc.UnaryServerInterceptor, grpc.StreamServerInterceptor, error) { meter := metrics.GetMeter() - latencyHistgram, err := meter.Float64Histogram( + latencyHistogram, err := meter.Float64Histogram( latencyMetricsName, metrics.WithDescription("Server latency in milliseconds, by method"), metrics.WithUnit(metrics.Milliseconds), @@ -56,7 +56,7 @@ func MetricInterceptors() (grpc.UnaryServerInterceptor, grpc.StreamServerInterce record := func(ctx context.Context, method string, err error, latency float64) { attrs := attributesFromError(method, err) - latencyHistgram.Record(ctx, latency, metrics.WithAttributes(attrs...)) + latencyHistogram.Record(ctx, latency, metrics.WithAttributes(attrs...)) completedRPCCnt.Add(ctx, 1, metrics.WithAttributes(attrs...)) } return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { diff --git a/internal/net/grpc/option.go b/internal/net/grpc/option.go index 7bf0f99c2a..b73514f559 100644 --- a/internal/net/grpc/option.go +++ b/internal/net/grpc/option.go @@ -124,16 +124,6 @@ func WithConnectionPoolSize(size int) Option { } } -func WithDialOptions(opts ...grpc.DialOption) Option { - return func(g *gRPCClient) { - if g.dopts != nil && len(g.dopts) > 0 { - g.dopts = append(g.dopts, opts...) - } else { - g.dopts = opts - } - } -} - func WithBackoffMaxDelay(dur string) Option { return func(g *gRPCClient) { if len(dur) == 0 { @@ -200,16 +190,6 @@ func WithMinConnectTimeout(dur string) Option { } } -func WithCallOptions(opts ...grpc.CallOption) Option { - return func(g *gRPCClient) { - if g.copts != nil && len(g.copts) > 0 { - g.copts = append(g.copts, opts...) - } else { - g.copts = opts - } - } -} - func WithErrGroup(eg errgroup.Group) Option { return func(g *gRPCClient) { if eg != nil { @@ -234,30 +214,61 @@ func WithCircuitBreaker(cb circuitbreaker.CircuitBreaker) Option { } } -func WithWaitForReady(flg bool) Option { +/* +API References https://pkg.go.dev/google.golang.org/grpc#CallOption + +1. Already Implemented APIs +- func CallContentSubtype(contentSubtype string) CallOption +- func MaxCallRecvMsgSize(bytes int) CallOption +- func MaxCallSendMsgSize(bytes int) CallOption +- func MaxRetryRPCBufferSize(bytes int) CallOption +- func WaitForReady(waitForReady bool) CallOption + +2. Unnecessary for this package APIs +- func Header(md *metadata.MD) CallOption +- func Peer(p *peer.Peer) CallOption +- func PerRPCCredentials(creds credentials.PerRPCCredentials) CallOption +- func StaticMethod() CallOption +- func Trailer(md *metadata.MD) CallOption + +3. Experimental APIs +- func ForceCodec(codec encoding.Codec) CallOption +- func ForceCodecV2(codec encoding.CodecV2) CallOption +- func OnFinish(onFinish func(err error)) CallOption +- func UseCompressor(name string) CallOption + +4. Deprecated APIs +- func CallCustomCodec(codec Codec) CallOption +- func FailFast(failFast bool) CallOption +*/ +const defaultCallOptionLength = 5 + +func WithCallOptions(opts ...grpc.CallOption) Option { return func(g *gRPCClient) { - g.copts = append(g.copts, - grpc.WaitForReady(flg), - ) + if g.copts != nil && len(g.copts) > 0 { + g.copts = append(g.copts, opts...) + } else { + g.copts = opts + } } } -func WithMaxRetryRPCBufferSize(size int) Option { +func WithCallContentSubtype(contentSubtype string) Option { return func(g *gRPCClient) { - if size > 1 { - g.copts = append(g.copts, - grpc.MaxRetryRPCBufferSize(size), - ) + if g.copts == nil && cap(g.copts) == 0 { + g.copts = make([]grpc.CallOption, 0, defaultCallOptionLength) } + g.copts = append(g.copts, grpc.CallContentSubtype(contentSubtype)) } } func WithMaxRecvMsgSize(size int) Option { return func(g *gRPCClient) { if size > 1 { - g.copts = append(g.copts, - grpc.MaxCallRecvMsgSize(size), - ) + if g.copts == nil && cap(g.copts) == 0 { + g.copts = make([]grpc.CallOption, 0, defaultCallOptionLength) + } + g.copts = append(g.copts, grpc.MaxCallRecvMsgSize(size)) } } } @@ -265,9 +276,95 @@ func WithMaxRecvMsgSize(size int) Option { func WithMaxSendMsgSize(size int) Option { return func(g *gRPCClient) { if size > 1 { - g.copts = append(g.copts, - grpc.MaxCallSendMsgSize(size), - ) + if g.copts == nil && cap(g.copts) == 0 { + g.copts = make([]grpc.CallOption, 0, defaultCallOptionLength) + } + g.copts = append(g.copts, grpc.MaxCallSendMsgSize(size)) + } + } +} + +func WithMaxRetryRPCBufferSize(size int) Option { + return func(g *gRPCClient) { + if size > 1 { + if g.copts == nil && cap(g.copts) == 0 { + g.copts = make([]grpc.CallOption, 0, defaultCallOptionLength) + } + g.copts = append(g.copts, grpc.MaxRetryRPCBufferSize(size)) + } + } +} + +func WithWaitForReady(flg bool) Option { + return func(g *gRPCClient) { + if g.copts == nil && cap(g.copts) == 0 { + g.copts = make([]grpc.CallOption, 0, defaultCallOptionLength) + } + g.copts = append(g.copts, grpc.WaitForReady(flg)) + } +} + +/* +API References https://pkg.go.dev/google.golang.org/grpc#DialOption + +1. Already Implemented APIs +- func WithAuthority(a string) DialOption +- func WithContextDialer(f func(context.Context, string) (net.Conn, error)) DialOption +- func WithDisableRetry() DialOption +- func WithIdleTimeout(d time.Duration) DialOption +- func WithInitialConnWindowSize(s int32) DialOption +- func WithInitialWindowSize(s int32) DialOption +- func WithKeepaliveParams(kp keepalive.ClientParameters) DialOption +- func WithMaxCallAttempts(n int) DialOption +- func WithMaxHeaderListSize(s uint32) DialOption +- func WithReadBufferSize(s int) DialOption +- func WithSharedWriteBuffer(val bool) DialOption +- func WithTransportCredentials(creds credentials.TransportCredentials) DialOption +- func WithUserAgent(s string) DialOption +- func WithWriteBufferSize(s int) DialOption + +2. Unnecessary for this package APIs +- func WithChainStreamInterceptor(interceptors ...StreamClientInterceptor) DialOption +- func WithChainUnaryInterceptor(interceptors ...UnaryClientInterceptor) DialOption +- func WithConnectParams(p ConnectParams) DialOption +- func WithDefaultCallOptions(cos ...CallOption) DialOption +- func WithDefaultServiceConfig(s string) DialOption +- func WithDisableServiceConfig() DialOption +- func WithPerRPCCredentials(creds credentials.PerRPCCredentials) DialOption +- func WithStatsHandler(h stats.Handler) DialOption +- func WithStreamInterceptor(f StreamClientInterceptor) DialOption +- func WithUnaryInterceptor(f UnaryClientInterceptor) DialOption + +3. Experimental APIs +- func WithChannelzParentID(c channelz.Identifier) DialOption +- func WithCredentialsBundle(b credentials.Bundle) DialOption +- func WithDisableHealthCheck() DialOption +- func WithNoProxy() DialOption +- func WithResolvers(rs ...resolver.Builder) DialOption + +4. Deprecated APIs +- func FailOnNonTempDialError(f bool) DialOption +- func WithBackoffConfig(b BackoffConfig) DialOption +- func WithBackoffMaxDelay(md time.Duration) DialOption +- func WithBlock() DialOption +- func WithCodec(c Codec) DialOption +- func WithCompressor(cp Compressor) DialOption +- func WithDecompressor(dc Decompressor) DialOption +- func WithDialer(f func(string, time.Duration) (net.Conn, error)) DialOption +- func WithInsecure() DialOption +- func WithMaxMsgSize(s int) DialOption +- func WithReturnConnectionError() DialOption +- func WithTimeout(d time.Duration) DialOption +*/ + +const defaultDialOptionLength = 14 + +func WithDialOptions(opts ...grpc.DialOption) Option { + return func(g *gRPCClient) { + if g.dopts != nil && len(g.dopts) > 0 { + g.dopts = append(g.dopts, opts...) + } else { + g.dopts = opts } } } @@ -275,6 +372,9 @@ func WithMaxSendMsgSize(size int) Option { func WithWriteBufferSize(size int) Option { return func(g *gRPCClient) { if size > 1 { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } g.dopts = append(g.dopts, grpc.WithWriteBufferSize(size), ) @@ -285,6 +385,9 @@ func WithWriteBufferSize(size int) Option { func WithReadBufferSize(size int) Option { return func(g *gRPCClient) { if size > 1 { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } g.dopts = append(g.dopts, grpc.WithReadBufferSize(size), ) @@ -292,21 +395,27 @@ func WithReadBufferSize(size int) Option { } } -func WithInitialWindowSize(size int) Option { +func WithInitialWindowSize(size int32) Option { return func(g *gRPCClient) { if size > 1 { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } g.dopts = append(g.dopts, - grpc.WithInitialWindowSize(int32(size)), + grpc.WithInitialWindowSize(size), ) } } } -func WithInitialConnectionWindowSize(size int) Option { +func WithInitialConnectionWindowSize(size int32) Option { return func(g *gRPCClient) { if size > 1 { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } g.dopts = append(g.dopts, - grpc.WithInitialConnWindowSize(int32(size)), + grpc.WithInitialConnWindowSize(size), ) } } @@ -315,6 +424,9 @@ func WithInitialConnectionWindowSize(size int) Option { func WithMaxMsgSize(size int) Option { return func(g *gRPCClient) { if size > 1 { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } g.dopts = append(g.dopts, grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(size)), ) @@ -325,6 +437,9 @@ func WithMaxMsgSize(size int) Option { func WithInsecure(flg bool) Option { return func(g *gRPCClient) { if flg { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } g.dopts = append(g.dopts, grpc.WithTransportCredentials(insecure.NewCredentials()), ) @@ -339,22 +454,25 @@ func WithKeepaliveParams(t, to string, permitWithoutStream bool) Option { } td, err := timeutil.Parse(t) if err != nil { - log.Errorf("failed to parse grpc keepalive time: %v", err) + log.Errorf("failed to parse grpc keepalive time: %s,\t%v", t, err) return } if td <= 0 { log.Errorf("invalid grpc keepalive time: %d", td) return } - tod, err := timeutil.Parse(t) + tod, err := timeutil.Parse(to) if err != nil { - log.Errorf("failed to parse grpc keepalive timeout: %v", err) + log.Errorf("failed to parse grpc keepalive timeout: %s,\t%v", to, err) return } if tod <= 0 { log.Errorf("invalid grpc keepalive timeout: %d", tod) return } + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } g.dopts = append(g.dopts, grpc.WithKeepaliveParams( keepalive.ClientParameters{ @@ -367,15 +485,26 @@ func WithKeepaliveParams(t, to string, permitWithoutStream bool) Option { } } -func WithDialer(der net.Dialer) Option { +func WithDialer(network string, der net.Dialer) Option { return func(g *gRPCClient) { if der != nil { g.dialer = der + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } + nt := net.NetworkTypeFromString(network) + switch nt { + case net.UDP, net.UDP4, net.UDP6: + nt = net.UDP + case net.UNIX, net.UNIXGRAM, net.UNIXPACKET: + nt = net.UNIX + default: + nt = net.TCP + } g.dopts = append(g.dopts, grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { - // TODO we need change network type dynamically - log.Debugf("gRPC context Dialer addr is %s", addr) - return der.GetDialer()(ctx, net.TCP.String(), addr) + log.Debugf("gRPC context Dialer for network %s, addr is %s", nt.String(), addr) + return g.dialer.GetDialer()(ctx, nt.String(), addr) }), ) } @@ -385,6 +514,9 @@ func WithDialer(der net.Dialer) Option { func WithTLSConfig(cfg *tls.Config) Option { return func(g *gRPCClient) { if cfg != nil { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } g.dopts = append(g.dopts, grpc.WithTransportCredentials(credentials.NewTLS(cfg)), ) @@ -392,8 +524,112 @@ func WithTLSConfig(cfg *tls.Config) Option { } } +func WithAuthority(a string) Option { + return func(g *gRPCClient) { + if a != "" { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } + g.dopts = append(g.dopts, + grpc.WithAuthority(a), + ) + } + } +} + +func WithDisableRetry(disable bool) Option { + return func(g *gRPCClient) { + if disable { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } + g.dopts = append(g.dopts, + grpc.WithDisableRetry(), + ) + } + } +} + +func WithIdleTimeout(dur string) Option { + return func(g *gRPCClient) { + if len(dur) == 0 { + return + } + d, err := timeutil.Parse(dur) + if err != nil { + log.Errorf("failed to parse idle timeout duration: %v", err) + return + } + if d <= 0 { + log.Errorf("invalid idle timeout duration: %d", d) + return + } + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } + g.dopts = append(g.dopts, + grpc.WithIdleTimeout(d), + ) + } +} + +func WithMaxCallAttempts(n int) Option { + return func(g *gRPCClient) { + if n > 2 { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } + g.dopts = append(g.dopts, + grpc.WithMaxCallAttempts(n), + ) + } + } +} + +func WithMaxHeaderListSize(size uint32) Option { + return func(g *gRPCClient) { + if size > 0 { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } + g.dopts = append(g.dopts, + grpc.WithMaxHeaderListSize(size), + ) + } + } +} + +func WithSharedWriteBuffer(enable bool) Option { + return func(g *gRPCClient) { + if enable { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } + g.dopts = append(g.dopts, + grpc.WithSharedWriteBuffer(enable), + ) + } + } +} + +func WithUserAgent(ua string) Option { + return func(g *gRPCClient) { + if ua != "" { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } + g.dopts = append(g.dopts, + grpc.WithUserAgent(ua), + ) + } + } +} + func WithClientInterceptors(names ...string) Option { return func(g *gRPCClient) { + if g.dopts == nil && cap(g.dopts) == 0 { + g.dopts = make([]grpc.DialOption, 0, defaultDialOptionLength) + } for _, name := range names { switch strings.ToLower(name) { case "traceinterceptor", "trace": diff --git a/internal/net/grpc/pool/pool.go b/internal/net/grpc/pool/pool.go index 17e1859ee8..1af0edb867 100644 --- a/internal/net/grpc/pool/pool.go +++ b/internal/net/grpc/pool/pool.go @@ -127,12 +127,12 @@ func New(ctx context.Context, opts ...Option) (c Conn, err error) { p.addr = net.JoinHostPort(p.host, p.port) } - conn, err := grpc.DialContext(ctx, p.addr, p.dopts...) + conn, err := grpc.NewClient(p.addr, p.dopts...) if err != nil { log.Warnf("grpc.New initial Dial check to %s returned error: %v", p.addr, err) if conn != nil { err = conn.Close() - if err != nil && !errors.Is(err, grpc.ErrClientConnClosing) { + if err != nil { log.Warn("failed to close connection:", err) } } @@ -143,11 +143,11 @@ func New(ctx context.Context, opts ...Option) (c Conn, err error) { } p.port = port p.addr = net.JoinHostPort(p.host, p.port) - conn, err = grpc.DialContext(ctx, p.addr, p.dopts...) + conn, err = grpc.NewClient(p.addr, p.dopts...) if err != nil { if conn != nil { cerr := conn.Close() - if cerr != nil && !errors.Is(cerr, grpc.ErrClientConnClosing) { + if cerr != nil { return nil, errors.Join(err, cerr) } } @@ -156,7 +156,7 @@ func New(ctx context.Context, opts ...Option) (c Conn, err error) { } if conn != nil { err = conn.Close() - if err != nil && !errors.Is(err, grpc.ErrClientConnClosing) { + if err != nil { return nil, err } } @@ -445,7 +445,7 @@ func (p *pool) Disconnect() (err error) { err = p.loop(ctx, func(ctx context.Context, _ int, pc *poolConn) bool { if pc != nil && pc.conn != nil { ierr := pc.conn.Close() - if ierr != nil && !errors.Is(ierr, grpc.ErrClientConnClosing) { + if ierr != nil { if !errors.Is(ierr, context.DeadlineExceeded) && !errors.Is(ierr, context.Canceled) { log.Debugf("failed to close connection pool addr = %s\terror = %v", pc.addr, ierr) @@ -469,11 +469,11 @@ func (p *pool) dial(ctx context.Context, addr string) (conn *ClientConn, err err do := func() (conn *ClientConn, err error) { ctx, cancel := context.WithTimeout(ctx, p.dialTimeout) defer cancel() - conn, err = grpc.DialContext(ctx, addr, append(p.dopts, grpc.WithBlock())...) + conn, err = grpc.NewClient(addr, p.dopts...) if err != nil { if conn != nil { cerr := conn.Close() - if cerr != nil && !errors.Is(cerr, grpc.ErrClientConnClosing) { + if cerr != nil { err = errors.Join(err, cerr) } } @@ -483,7 +483,7 @@ func (p *pool) dial(ctx context.Context, addr string) (conn *ClientConn, err err if !isHealthy(ctx, conn) { if conn != nil { err = conn.Close() - if err != nil && !errors.Is(err, grpc.ErrClientConnClosing) { + if err != nil { err = errors.Join(errors.ErrGRPCClientConnNotFound(addr), err) } else { err = errors.ErrGRPCClientConnNotFound(addr) @@ -568,28 +568,50 @@ func (p *pool) IsHealthy(ctx context.Context) (healthy bool) { return unhealthy < pl } -func (p *pool) Do(ctx context.Context, f func(conn *ClientConn) error) error { +func (p *pool) Do(ctx context.Context, f func(conn *ClientConn) error) (err error) { if p == nil { return errors.ErrGRPCClientConnNotFound("*") } - conn, ok := p.Get(ctx) + idx, conn, ok := p.getHealthyConn(ctx, 0, p.Len()) if !ok || conn == nil { return errors.ErrGRPCClientConnNotFound(p.addr) } - return f(conn) + err = f(conn) + if errors.Is(err, grpc.ErrClientConnClosing) { + if conn != nil { + if cerr := conn.Close(); cerr != nil && !errors.Is(cerr, grpc.ErrClientConnClosing) { + log.Warnf("Failed to close connection: %v", cerr) + } + } + conn, err = p.dial(ctx, p.addr) + if err == nil && conn != nil && isHealthy(ctx, conn) { + p.store(idx, &poolConn{ + conn: conn, + addr: p.addr, + }) + if newErr := f(conn); newErr != nil { + return errors.Join(err, newErr) + } + return nil + } + } + return err } -func (p *pool) Get(ctx context.Context) (*ClientConn, bool) { - return p.getHealthyConn(ctx, 0, p.Len()) +func (p *pool) Get(ctx context.Context) (conn *ClientConn, ok bool) { + _, conn, ok = p.getHealthyConn(ctx, 0, p.Len()) + return conn, ok } -func (p *pool) getHealthyConn(ctx context.Context, cnt, retry uint64) (*ClientConn, bool) { +func (p *pool) getHealthyConn( + ctx context.Context, cnt, retry uint64, +) (idx int, conn *ClientConn, ok bool) { if p == nil || p.closing.Load() { - return nil, false + return 0, nil, false } select { case <-ctx.Done(): - return nil, false + return 0, nil, false default: } pl := p.Len() @@ -599,14 +621,13 @@ func (p *pool) getHealthyConn(ctx context.Context, cnt, retry uint64) (*ClientCo if err := p.Disconnect(); err != nil { log.Debugf("failed to disconnect gRPC IP direct connection for %s,\terr: %v", p.addr, err) } - return nil, false + return 0, nil, false } - var idx int if pl > 0 { idx = int(p.current.Add(1) % pl) } if pc := p.load(idx); pc != nil && isHealthy(ctx, pc.conn) { - return pc.conn, true + return idx, pc.conn, true } conn, err := p.dial(ctx, p.addr) if err == nil && conn != nil && isHealthy(ctx, conn) { @@ -614,15 +635,16 @@ func (p *pool) getHealthyConn(ctx context.Context, cnt, retry uint64) (*ClientCo conn: conn, addr: p.addr, }) - return conn, true + return idx, conn, true } log.Warnf("failed to find gRPC connection pool for %s.\tlen(pool): %d,\tretried: %d,\terror: %v", p.addr, pl, cnt, err) - return nil, false + return idx, nil, false } if pl > 0 { - if pc := p.load(int(p.current.Add(1) % pl)); pc != nil && isHealthy(ctx, pc.conn) { - return pc.conn, true + idx = int(p.current.Add(1) % pl) + if pc := p.load(idx); pc != nil && isHealthy(ctx, pc.conn) { + return idx, pc.conn, true } } retry-- @@ -657,23 +679,17 @@ func (p *pool) lookupIPAddr(ctx context.Context) (ips []string, err error) { ctx, cancel := context.WithTimeout(ctx, time.Millisecond*10) conn, err := net.DialContext(ctx, net.TCP.String(), addr) cancel() - if err != nil { + if err != nil || conn == nil { log.Warnf("failed to initialize ping addr: %s,\terr: %s", addr, err.Error()) - if conn != nil { - err = conn.Close() - if err != nil && !errors.Is(err, grpc.ErrClientConnClosing) { - log.Warn("failed to close connection:", err) - } - } - continue + } else { + ips = append(ips, ipStr) } if conn != nil { err = conn.Close() - if err != nil && !errors.Is(err, grpc.ErrClientConnClosing) { + if err != nil && !errors.Is(err, context.Canceled) { log.Warn("failed to close connection:", err) } } - ips = append(ips, ipStr) } if len(ips) == 0 { @@ -697,10 +713,7 @@ func (p *pool) scanGRPCPort(ctx context.Context) (port uint16, err error) { return 0, ctx.Err() default: // try gRPC dialing to target port - conn, err = grpc.DialContext(ctx, - net.JoinHostPort(p.host, port), - append(p.dopts, grpc.WithBlock())...) - + conn, err = grpc.NewClient(net.JoinHostPort(p.host, port), p.dopts...) if err == nil && isHealthy(ctx, conn) && conn.Close() == nil { // if no error and healthy the port is ready for gRPC return port, nil diff --git a/internal/net/grpc/pool/pool_bench_test.go b/internal/net/grpc/pool/pool_bench_test.go index d40f76504d..b9effb3712 100644 --- a/internal/net/grpc/pool/pool_bench_test.go +++ b/internal/net/grpc/pool/pool_bench_test.go @@ -129,7 +129,7 @@ func Benchmark_ConnPool(b *testing.B) { func Benchmark_StaticDial(b *testing.B) { defer ListenAndServe(b, DefaultServerAddr)() - conn, err := grpc.DialContext(context.Background(), DefaultServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(DefaultServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { b.Error(err) } @@ -186,7 +186,7 @@ func BenchmarkParallel_ConnPool(b *testing.B) { func BenchmarkParallel_StaticDial(b *testing.B) { defer ListenAndServe(b, DefaultServerAddr)() - conn, err := grpc.DialContext(context.Background(), DefaultServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) + conn, err := grpc.NewClient(DefaultServerAddr, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { b.Error(err) } diff --git a/internal/net/grpc/server.go b/internal/net/grpc/server.go index a6777204aa..a45090b9e2 100644 --- a/internal/net/grpc/server.go +++ b/internal/net/grpc/server.go @@ -112,3 +112,67 @@ func MaxHeaderListSize(size uint32) ServerOption { func HeaderTableSize(size uint32) ServerOption { return grpc.HeaderTableSize(size) } + +// MaxConcurrentStreams returns a ServerOption that will apply a limit on the number of concurrent streams to each ServerTransport. +func MaxConcurrentStreams(n uint32) ServerOption { + return grpc.MaxConcurrentStreams(n) +} + +// NumStreamWorkers returns a ServerOption that sets the number of worker goroutines that should be used to process incoming streams. Setting this to zero +// (default) will disable workers and spawn a new goroutine for each stream. +func NumStreamWorkers(n uint32) ServerOption { + return grpc.NumStreamWorkers(n) +} + +// SharedWriteBuffer allows reusing per-connection transport write buffer. If this option is set to true every connection will release the buffer after flushing +// the data on the wire. +func SharedWriteBuffer(val bool) ServerOption { + return grpc.SharedWriteBuffer(val) +} + +// WaitForHandlers cause Stop to wait until all outstanding method handlers have exited before returning. If false, Stop will return as soon as all connections +// have closed, but method handlers may still be running. By default, Stop does not wait for method handlers to return. +func WaitForHandlers(val bool) ServerOption { + return grpc.WaitForHandlers(val) +} + +/* +API References https://pkg.go.dev/google.golang.org/grpc#ServerOption + +1. Already Implemented APIs +- func ConnectionTimeout(d time.Duration) ServerOption +- func Creds(c credentials.TransportCredentials) ServerOption +- func HeaderTableSize(s uint32) ServerOption +- func InitialConnWindowSize(s int32) ServerOption +- func InitialWindowSize(s int32) ServerOption +- func KeepaliveEnforcementPolicy(kep keepalive.EnforcementPolicy) ServerOption +- func KeepaliveParams(kp keepalive.ServerParameters) ServerOption +- func MaxConcurrentStreams(n uint32) ServerOption +- func MaxHeaderListSize(s uint32) ServerOption +- func MaxRecvMsgSize(m int) ServerOption +- func MaxSendMsgSize(m int) ServerOption +- func NumStreamWorkers(numServerWorkers uint32) ServerOption +- func ReadBufferSize(s int) ServerOption +- func SharedWriteBuffer(val bool) ServerOption +- func WaitForHandlers(w bool) ServerOption +- func WriteBufferSize(s int) ServerOption + +2. Unnecessary for this package APIs +- func ChainStreamInterceptor(interceptors ...StreamServerInterceptor) ServerOption +- func ChainUnaryInterceptor(interceptors ...UnaryServerInterceptor) ServerOption +- func StreamInterceptor(i StreamServerInterceptor) ServerOption +- func UnaryInterceptor(i UnaryServerInterceptor) ServerOption + +3. Experimental APIs +- func ForceServerCodec(codec encoding.Codec) ServerOption +- func ForceServerCodecV2(codecV2 encoding.CodecV2) ServerOption +- func InTapHandle(h tap.ServerInHandle) ServerOption +- func StatsHandler(h stats.Handler) ServerOption +- func UnknownServiceHandler(streamHandler StreamHandler) ServerOption + +4. Deprecated APIs +- func CustomCodec(codec Codec) ServerOption +- func MaxMsgSize(m int) ServerOption +- func RPCCompressor(cp Compressor) ServerOption +- func RPCDecompressor(dc Decompressor) ServerOption +*/ diff --git a/internal/net/grpc/status/status.go b/internal/net/grpc/status/status.go index e353b9c865..4113d872ac 100644 --- a/internal/net/grpc/status/status.go +++ b/internal/net/grpc/status/status.go @@ -18,8 +18,10 @@ package status import ( + "cmp" "context" - "os" + "slices" + "strconv" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" @@ -28,6 +30,8 @@ import ( "github.com/vdaas/vald/internal/net/grpc/errdetails" "github.com/vdaas/vald/internal/net/grpc/proto" "github.com/vdaas/vald/internal/net/grpc/types" + "github.com/vdaas/vald/internal/os" + "github.com/vdaas/vald/internal/strings" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/status" ) @@ -134,7 +138,7 @@ func ParseError( st, ok = FromError(err) if !ok || st == nil { if defaultCode == 0 { - defaultCode = codes.Internal + defaultCode = codes.Unknown } if len(defaultMsg) == 0 { defaultMsg = "failed to parse gRPC status from error" @@ -155,6 +159,18 @@ func ParseError( return st, msg, err } + switch st.Code() { + case codes.Aborted, + codes.Canceled, + codes.DeadlineExceeded, + codes.AlreadyExists, + codes.NotFound, + codes.OK, + codes.Unimplemented: + return st, st.Message(), st.Err() + default: + } + sst := withDetails(st, err, details...) if sst != nil { return sst, sst.Message(), sst.Err() @@ -236,77 +252,532 @@ func FromError(err error) (st *Status, ok bool) { } } -func withDetails(st *Status, err error, details ...any) *Status { - msgs := make([]proto.MessageV1, 0, 1+len(details)*2) +var hostname = func() (h string) { + var err error + h, err = os.Hostname() if err != nil { - msgs = append(msgs, &errdetails.ErrorInfo{ + log.Warnf("failed to fetch hostname: %s,\terror: %v", h, err) + h = "unknown-host" + } + return h +}() + +func toProtoMessage(err error, details ...any) (dmap map[string][]proto.Message) { + dmap = make(map[string][]proto.Message, len(details)+1) + if err != nil { + typeName := errdetails.ErrorInfoMessageName + dmap[typeName] = []proto.Message{&errdetails.ErrorInfo{ Reason: err.Error(), - Domain: func() (hostname string) { - var err error - hostname, err = os.Hostname() - if err != nil { - log.Warn("failed to fetch hostname:", err) - } - return hostname - }(), - }) + Domain: hostname, + }} } for _, detail := range details { if detail == nil { continue } + var ( + typeName string + msg proto.Message + ) switch v := detail.(type) { case *spb.Status: if v != nil { - msgs = append(msgs, proto.ToMessageV1(v)) + for _, d := range v.GetDetails() { + typeName = d.GetTypeUrl() + if typeName != "" { + msg = errdetails.AnyToErrorDetail(d) + if msg != nil { + dm, ok := dmap[typeName] + if ok && dm != nil { + dmap[typeName] = append(dm, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } + } + } } case spb.Status: - msgs = append(msgs, proto.ToMessageV1(&v)) + for _, d := range v.GetDetails() { + typeName = d.GetTypeUrl() + if typeName != "" { + msg = errdetails.AnyToErrorDetail(d) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } + } + } case *status.Status: if v != nil { - msgs = append(msgs, proto.ToMessageV1(&spb.Status{ - Code: v.Proto().GetCode(), - Message: v.Message(), - })) - for _, d := range v.Proto().Details { - msgs = append(msgs, proto.ToMessageV1(errdetails.AnyToErrorDetail(d))) + for _, d := range v.Proto().GetDetails() { + typeName = d.GetTypeUrl() + if typeName != "" { + msg = errdetails.AnyToErrorDetail(d) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } + } } } case status.Status: - msgs = append(msgs, proto.ToMessageV1(&spb.Status{ - Code: v.Proto().GetCode(), - Message: v.Message(), - })) - for _, d := range v.Proto().Details { - msgs = append(msgs, proto.ToMessageV1(errdetails.AnyToErrorDetail(d))) + for _, d := range v.Proto().GetDetails() { + typeName = d.GetTypeUrl() + if typeName != "" { + msg = errdetails.AnyToErrorDetail(d) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } + } } case *info.Detail: if v != nil { - msgs = append(msgs, errdetails.DebugInfoFromInfoDetail(v)) + typeName = errdetails.DebugInfoMessageName + msg = errdetails.DebugInfoFromInfoDetail(v) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } } case info.Detail: - msgs = append(msgs, errdetails.DebugInfoFromInfoDetail(&v)) + typeName = errdetails.DebugInfoMessageName + msg = errdetails.DebugInfoFromInfoDetail(&v) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } case *types.Any: if v != nil { - msgs = append(msgs, proto.ToMessageV1(errdetails.AnyToErrorDetail(v))) + typeName = v.GetTypeUrl() + msg = errdetails.AnyToErrorDetail(v) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } } case types.Any: - msgs = append(msgs, proto.ToMessageV1(errdetails.AnyToErrorDetail(&v))) + typeName = v.GetTypeUrl() + msg = errdetails.AnyToErrorDetail(&v) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } case *proto.Message: if v != nil { - msgs = append(msgs, proto.ToMessageV1(*v)) + typeName = typeURL(*v) + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, *v) + } else { + dmap[typeName] = []proto.Message{*v} + } } case proto.Message: - msgs = append(msgs, proto.ToMessageV1(v)) + typeName = typeURL(v) + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, v) + } else { + dmap[typeName] = []proto.Message{v} + } case *proto.MessageV1: if v != nil { - msgs = append(msgs, *v) + msg = proto.ToMessageV2(*v) + typeName = typeURL(msg) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } } case proto.MessageV1: - msgs = append(msgs, v) + msg = proto.ToMessageV2(v) + typeName = typeURL(msg) + if msg != nil { + d, ok := dmap[typeName] + if ok && d != nil { + dmap[typeName] = append(d, msg) + } else { + dmap[typeName] = []proto.Message{msg} + } + } + } + } + return dmap +} + +func withDetails(st *Status, err error, details ...any) *Status { + if st != nil { + if len(st.Details()) == 0 { + ds := make([]proto.MessageV1, 0, len(details)) + for _, msgs := range toProtoMessage(err, details) { + for _, msg := range msgs { + ds = append(ds, proto.ToMessageV1(msg)) + } + } + sst, err := st.WithDetails(ds...) + if err != nil { + return st + } + return sst + } + details = append(st.Details(), details...) } + dmap := toProtoMessage(err, details) + + msgs := make([]proto.MessageV1, 0, len(dmap)) + visited := make(map[string]bool, len(dmap)) + for typeName, ds := range dmap { + switch typeName { + case errdetails.DebugInfoMessageName: + m := new(errdetails.DebugInfo) + for _, msg := range ds { + d, ok := msg.(*errdetails.DebugInfo) + if ok && d != nil { + key := errdetails.DebugInfoMessageName + d.GetDetail() + strings.Join(d.GetStackEntries(), ",") + if !visited[key] { + visited[key] = true + if m.GetDetail() == "" { + m.Detail = d.GetDetail() + } else if m.GetDetail() != d.GetDetail() && !strings.Contains(m.GetDetail(), d.GetDetail()) { + m.Detail += "\t" + d.GetDetail() + } + if len(m.GetStackEntries()) < len(d.GetStackEntries()) { + m.StackEntries = d.GetStackEntries() + } + } + } + } + m.Detail = removeDuplicatesFromTSVLine(m.GetDetail()) + msgs = append(msgs, m) + case errdetails.ErrorInfoMessageName: + m := new(errdetails.ErrorInfo) + for _, msg := range ds { + e, ok := msg.(*errdetails.ErrorInfo) + if ok && e != nil && !visited[e.GetReason()] { + visited[e.GetReason()] = true + key := errdetails.ErrorInfoMessageName + e.GetDomain() + e.GetReason() + if !visited[key] { + visited[key] = true + if m.GetDomain() == "" { + m.Domain = e.GetDomain() + } else if m.GetDomain() != e.GetDomain() && !strings.Contains(m.GetDomain(), e.GetDomain()) { + m.Domain += "\t" + e.GetDomain() + } + if m.GetReason() == "" { + m.Reason += e.GetReason() + } else if m.GetReason() != e.GetReason() && !strings.Contains(m.GetReason(), e.GetReason()) { + m.Reason += "\t" + e.GetReason() + } + if e.GetMetadata() != nil { + if m.GetMetadata() == nil { + m.Metadata = e.GetMetadata() + } else { + m.Metadata = appendM(m.GetMetadata(), e.GetMetadata()) + } + } + } + } + } + m.Reason = removeDuplicatesFromTSVLine(m.GetReason()) + m.Domain = removeDuplicatesFromTSVLine(m.GetDomain()) + msgs = append(msgs, m) + case errdetails.BadRequestMessageName: + m := new(errdetails.BadRequest) + for _, msg := range ds { + b, ok := msg.(*errdetails.BadRequest) + if ok && b != nil && b.GetFieldViolations() != nil && !visited[b.String()] { + visited[b.String()] = true + if m.GetFieldViolations() == nil { + m = b + } else { + m.FieldViolations = append(m.GetFieldViolations(), b.GetFieldViolations()...) + } + } + } + slices.SortFunc(m.FieldViolations, func(left, right *errdetails.BadRequestFieldViolation) int { + return cmp.Compare(left.GetField(), right.GetField()) + }) + m.FieldViolations = slices.CompactFunc(m.GetFieldViolations(), func(left, right *errdetails.BadRequestFieldViolation) bool { + return left.GetField() == right.GetField() + }) + msgs = append(msgs, m) + case errdetails.BadRequestFieldViolationMessageName: + m := new(errdetails.BadRequestFieldViolation) + for _, msg := range ds { + b, ok := msg.(*errdetails.BadRequestFieldViolation) + if ok && b != nil { + key := errdetails.BadRequestFieldViolationMessageName + b.GetField() + b.GetDescription() + if !visited[key] { + visited[key] = true + if m.GetField() == "" { + m.Field = b.GetField() + } else if m.GetField() != b.GetField() && !strings.Contains(m.GetField(), b.GetField()) { + m.Field += "\t" + b.GetField() + } + if m.GetDescription() == "" { + m.Description = b.GetDescription() + } else if m.GetDescription() != b.GetDescription() && !strings.Contains(m.GetDescription(), b.GetDescription()) { + m.Description += "\t" + b.GetDescription() + } + } + } + } + msgs = append(msgs, m) + case errdetails.LocalizedMessageMessageName: + m := new(errdetails.LocalizedMessage) + for _, msg := range ds { + l, ok := msg.(*errdetails.LocalizedMessage) + if ok && l != nil { + key := errdetails.LocalizedMessageMessageName + l.GetLocale() + l.GetMessage() + if !visited[key] { + visited[key] = true + if m.GetLocale() == "" { + m.Locale = l.GetLocale() + } else if m.GetLocale() != l.GetLocale() && !strings.Contains(m.GetLocale(), l.GetLocale()) { + m.Locale += "\t" + l.GetLocale() + } + if m.GetMessage() == "" { + m.Message = l.GetMessage() + } else if m.GetMessage() != l.GetMessage() && !strings.Contains(m.GetMessage(), l.GetMessage()) { + m.Message += "\t" + l.GetMessage() + } + } + } + } + msgs = append(msgs, m) + case errdetails.PreconditionFailureMessageName: + m := new(errdetails.PreconditionFailure) + for _, msg := range ds { + p, ok := msg.(*errdetails.PreconditionFailure) + if ok && p != nil && p.GetViolations() != nil && !visited[p.String()] { + visited[p.String()] = true + if m.GetViolations() == nil { + m = p + } else { + m.Violations = append(m.GetViolations(), p.GetViolations()...) + } + } + } + slices.SortFunc(m.Violations, func(left, right *errdetails.PreconditionFailureViolation) int { + return cmp.Compare(left.GetType(), right.GetType()) + }) + m.Violations = slices.CompactFunc(m.GetViolations(), func(left, right *errdetails.PreconditionFailureViolation) bool { + return left.GetType() == right.GetType() + }) + msgs = append(msgs, m) + case errdetails.PreconditionFailureViolationMessageName: + m := new(errdetails.PreconditionFailureViolation) + for _, msg := range ds { + p, ok := msg.(*errdetails.PreconditionFailureViolation) + if ok && p != nil { + key := errdetails.PreconditionFailureViolationMessageName + p.GetType() + p.GetSubject() + p.GetDescription() + if !visited[key] { + visited[key] = true + if m.GetType() == "" { + m.Type = p.GetType() + } else if m.GetType() != p.GetType() && !strings.Contains(m.GetType(), p.GetType()) { + m.Type += "\t" + p.GetType() + } + if m.GetSubject() == "" { + m.Subject = p.GetSubject() + } else if m.GetSubject() != p.GetSubject() && !strings.Contains(m.GetSubject(), p.GetSubject()) { + m.Subject += "\t" + p.GetSubject() + } + if m.GetDescription() == "" { + m.Description = p.GetDescription() + } else if m.GetDescription() != p.GetDescription() && !strings.Contains(m.GetDescription(), p.GetDescription()) { + m.Description += "\t" + p.GetDescription() + } + } + } + } + msgs = append(msgs, m) + case errdetails.HelpMessageName: + m := new(errdetails.Help) + for _, msg := range ds { + h, ok := msg.(*errdetails.Help) + if ok && h != nil && h.GetLinks() != nil && !visited[h.String()] { + visited[h.String()] = true + if m.GetLinks() == nil { + m = h + } else { + m.Links = append(m.GetLinks(), h.GetLinks()...) + } + } + } + slices.SortFunc(m.Links, func(left, right *errdetails.HelpLink) int { + return cmp.Compare(left.GetUrl(), right.GetUrl()) + }) + m.Links = slices.CompactFunc(m.GetLinks(), func(left, right *errdetails.HelpLink) bool { + return left.GetUrl() == right.GetUrl() + }) + msgs = append(msgs, m) + case errdetails.HelpLinkMessageName: + m := new(errdetails.HelpLink) + for _, msg := range ds { + h, ok := msg.(*errdetails.HelpLink) + if ok && h != nil { + key := errdetails.HelpLinkMessageName + h.GetUrl() + h.GetDescription() + if !visited[key] { + visited[key] = true + if m.GetUrl() == "" { + m.Url = h.GetUrl() + } else if m.GetUrl() != h.GetUrl() && !strings.Contains(m.GetUrl(), h.GetUrl()) { + m.Url += "\t" + h.GetUrl() + } + if m.GetDescription() == "" { + m.Description = h.GetDescription() + } else if m.GetDescription() != h.GetDescription() && !strings.Contains(m.GetDescription(), h.GetDescription()) { + m.Description += "\t" + h.GetDescription() + } + } + } + } + msgs = append(msgs, m) + case errdetails.QuotaFailureMessageName: + m := new(errdetails.QuotaFailure) + for _, msg := range ds { + q, ok := msg.(*errdetails.QuotaFailure) + if ok && q != nil && q.GetViolations() != nil && !visited[q.String()] { + visited[q.String()] = true + if m.GetViolations() == nil { + m = q + } else { + m.Violations = append(m.GetViolations(), q.GetViolations()...) + } + } + } + slices.SortFunc(m.Violations, func(left, right *errdetails.QuotaFailureViolation) int { + return cmp.Compare(left.GetSubject(), right.GetSubject()) + }) + m.Violations = slices.CompactFunc(m.GetViolations(), func(left, right *errdetails.QuotaFailureViolation) bool { + return left.GetSubject() == right.GetSubject() + }) + msgs = append(msgs, m) + case errdetails.QuotaFailureViolationMessageName: + m := new(errdetails.QuotaFailureViolation) + for _, msg := range ds { + q, ok := msg.(*errdetails.QuotaFailureViolation) + if ok && q != nil { + key := errdetails.QuotaFailureViolationMessageName + q.GetSubject() + q.GetDescription() + if !visited[key] { + visited[key] = true + if m.GetSubject() == "" { + m.Subject = q.GetSubject() + } else if m.GetSubject() != q.GetSubject() && !strings.Contains(m.GetSubject(), q.GetSubject()) { + m.Subject += "\t" + q.GetSubject() + } + if m.GetDescription() == "" { + m.Description = q.GetDescription() + } else if m.GetDescription() != q.GetDescription() && !strings.Contains(m.GetDescription(), q.GetDescription()) { + m.Description += "\t" + q.GetDescription() + } + } + } + } + msgs = append(msgs, m) + case errdetails.RequestInfoMessageName: + m := new(errdetails.RequestInfo) + for _, msg := range ds { + r, ok := msg.(*errdetails.RequestInfo) + if ok && r != nil { + key := errdetails.RequestInfoMessageName + r.GetRequestId() + r.GetServingData() + if !visited[key] { + visited[key] = true + if m.GetRequestId() == "" { + m.RequestId = r.GetRequestId() + } else if m.GetRequestId() != r.GetRequestId() && !strings.Contains(m.GetRequestId(), r.GetRequestId()) { + m.RequestId += "\t" + r.GetRequestId() + } + if m.GetServingData() == "" { + m.ServingData = r.GetServingData() + } else if m.GetServingData() != r.GetServingData() && !strings.Contains(m.GetServingData(), r.GetServingData()) { + m.ServingData += "\t" + r.GetServingData() + } + } + } + } + msgs = append(msgs, m) + case errdetails.ResourceInfoMessageName: + m := new(errdetails.ResourceInfo) + for _, msg := range ds { + r, ok := msg.(*errdetails.ResourceInfo) + if ok && r != nil { + key := errdetails.ResourceInfoMessageName + r.GetResourceType() + r.GetResourceName() + r.GetDescription() + if !visited[key] { + visited[key] = true + if m.GetResourceType() == "" { + m.ResourceType = r.GetResourceType() + } else if m.GetResourceType() != r.GetResourceType() && len(m.GetResourceType()) < len(r.GetResourceType()) { + m.ResourceType += r.GetResourceType() + } + if m.GetResourceName() == "" { + m.ResourceName = r.GetResourceName() + } else if m.GetResourceName() != r.GetResourceName() && !strings.Contains(m.GetResourceName(), r.GetResourceName()) { + m.ResourceName += "\t" + r.GetResourceName() + } + if m.GetDescription() == "" { + m.Description = r.GetDescription() + } else if m.GetDescription() != r.GetDescription() && !strings.Contains(m.GetDescription(), r.GetDescription()) { + m.Description += "\t" + r.GetDescription() + } + } + } + } + msgs = append(msgs, m) + case errdetails.RetryInfoMessageName: + m := new(errdetails.RetryInfo) + for _, msg := range ds { + r, ok := msg.(*errdetails.RetryInfo) + if ok && r != nil { + key := errdetails.RetryInfoMessageName + strconv.FormatInt(r.GetRetryDelay().GetSeconds(), 10) + strconv.FormatInt(int64(r.GetRetryDelay().GetNanos()), 10) + if !visited[key] { + visited[key] = true + if m.GetRetryDelay() == nil || r.GetRetryDelay().GetSeconds() < m.GetRetryDelay().GetSeconds() { + m.RetryDelay = r.GetRetryDelay() + } + } + } + } + msgs = append(msgs, m) + } + } if st == nil { if err != nil { st = New(codes.Unknown, err.Error()) @@ -314,21 +785,80 @@ func withDetails(st *Status, err error, details ...any) *Status { st = New(codes.Unknown, "") } } - - if len(msgs) != 0 { - sst, err := st.WithDetails(msgs...) - if err == nil && sst != nil { + if msgs != nil { + sst, err := status.New(st.Code(), st.Message()).WithDetails(msgs...) + if err == nil { st = sst - } else { - log.Warn("failed to set error details:", err) } } - Log(st.Code(), st.Err()) - return st } +func typeURL(msg proto.Message) string { + switch msg.(type) { + case *errdetails.DebugInfo: + return errdetails.DebugInfoMessageName + case *errdetails.ErrorInfo: + return errdetails.ErrorInfoMessageName + case *errdetails.BadRequest: + return errdetails.BadRequestMessageName + case *errdetails.BadRequestFieldViolation: + return errdetails.BadRequestFieldViolationMessageName + case *errdetails.LocalizedMessage: + return errdetails.LocalizedMessageMessageName + case *errdetails.PreconditionFailure: + return errdetails.PreconditionFailureMessageName + case *errdetails.PreconditionFailureViolation: + return errdetails.PreconditionFailureViolationMessageName + case *errdetails.Help: + return errdetails.HelpMessageName + case *errdetails.HelpLink: + return errdetails.HelpLinkMessageName + case *errdetails.QuotaFailure: + return errdetails.QuotaFailureMessageName + case *errdetails.QuotaFailureViolation: + return errdetails.QuotaFailureViolationMessageName + case *errdetails.RequestInfo: + return errdetails.RequestInfoMessageName + case *errdetails.ResourceInfo: + return errdetails.ResourceInfoMessageName + case *errdetails.RetryInfo: + return errdetails.RetryInfoMessageName + } + return "unknown" +} + +func appendM[K comparable](maps ...map[K]string) (result map[K]string) { + if len(maps) == 0 { + return nil + } + result = maps[0] + for _, m := range maps[1:] { + for k, v := range m { + ev, ok := result[k] + if ok && v != ev && !strings.Contains(v, ev) { + v += "\t" + ev + } + result[k] = v + } + } + return result +} + +func removeDuplicatesFromTSVLine(line string) string { + fields := strings.Split(line, "\t") + uniqueFields := make(map[string]bool) + result := make([]string, 0, len(fields)) + for _, field := range fields { + if !uniqueFields[field] { + uniqueFields[field] = true + result = append(result, field) + } + } + return strings.Join(result, "\t") +} + func Log(code codes.Code, err error) { if err != nil { switch code { diff --git a/internal/net/grpc/status/status_test.go b/internal/net/grpc/status/status_test.go index 56d187022a..6b2f1a552d 100644 --- a/internal/net/grpc/status/status_test.go +++ b/internal/net/grpc/status/status_test.go @@ -2151,6 +2151,94 @@ func TestParseError(t *testing.T) { // } // } // +// func Test_toProtoMessage(t *testing.T) { +// type args struct { +// err error +// details []any +// } +// type want struct { +// wantDmap map[string][]proto.Message +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, map[string][]proto.Message) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotDmap map[string][]proto.Message) error { +// if !reflect.DeepEqual(gotDmap, w.wantDmap) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDmap, w.wantDmap) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// err:nil, +// details:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// err:nil, +// details:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotDmap := toProtoMessage(test.args.err, test.args.details...) +// if err := checkFunc(test.want, gotDmap); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_withDetails(t *testing.T) { // type args struct { // st *Status @@ -2242,6 +2330,261 @@ func TestParseError(t *testing.T) { // } // } // +// func Test_typeURL(t *testing.T) { +// type args struct { +// msg proto.Message +// } +// type want struct { +// want string +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got string) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// msg:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// msg:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := typeURL(test.args.msg) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_appendM(t *testing.T) { +// type args struct { +// maps []map[K]string +// } +// type want struct { +// wantResult map[K]string +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, map[K]string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotResult map[K]string) error { +// if !reflect.DeepEqual(gotResult, w.wantResult) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotResult, w.wantResult) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// maps:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// maps:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotResult := appendM(test.args.maps...) +// if err := checkFunc(test.want, gotResult); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_removeDuplicatesFromTSVLine(t *testing.T) { +// type args struct { +// line string +// } +// type want struct { +// want string +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got string) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// line:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// line:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := removeDuplicatesFromTSVLine(test.args.line) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func TestLog(t *testing.T) { // type args struct { // code codes.Code diff --git a/internal/net/http/json/json.go b/internal/net/http/json/json.go index a821710525..11b156b567 100644 --- a/internal/net/http/json/json.go +++ b/internal/net/http/json/json.go @@ -20,7 +20,6 @@ import ( "bytes" "context" "net/http" - "os" "github.com/vdaas/vald/internal/encoding/json" "github.com/vdaas/vald/internal/errors" @@ -28,6 +27,7 @@ import ( "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/http/dump" "github.com/vdaas/vald/internal/net/http/rest" + "github.com/vdaas/vald/internal/os" ) // RFC7807Error represents RFC 7807 error. diff --git a/internal/net/http/json/json_test.go b/internal/net/http/json/json_test.go index 1df3ec916c..493101206a 100644 --- a/internal/net/http/json/json_test.go +++ b/internal/net/http/json/json_test.go @@ -377,7 +377,7 @@ func TestErrorHandler(t *testing.T) { } if got, want := w.Code, http.StatusInternalServerError; got != want { - return errors.Errorf("reso code not equals. want: %v, got: %v", http.StatusInternalServerError, got) + return errors.Errorf("response code not equals. want: %v, got: %v", http.StatusInternalServerError, got) } return nil }, @@ -666,7 +666,7 @@ func TestRequest(t *testing.T) { ctx context.Context method string url string - payloyd any + payload any data any } type want struct { @@ -694,7 +694,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "@", url: "/", - payloyd: nil, + payload: nil, data: nil, }, want: want{ @@ -709,7 +709,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: "/", - payloyd: 1 + 3i, + payload: 1 + 3i, data: new(any), }, checkFunc: func(w want, err error) error { @@ -730,7 +730,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: "/", - payloyd: "1", + payload: "1", data: new(any), }, want: want{ @@ -753,7 +753,7 @@ func TestRequest(t *testing.T) { ctx: context.Background(), method: "POST", url: srv.URL, - payloyd: "1", + payload: "1", data: &got, }, want: want{ @@ -791,7 +791,7 @@ func TestRequest(t *testing.T) { checkFunc = defaultCheckFunc } - err := Request(test.args.ctx, test.args.method, test.args.url, test.args.payloyd, test.args.data) + err := Request(test.args.ctx, test.args.method, test.args.url, test.args.payload, test.args.data) if err := checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) } diff --git a/internal/net/net.go b/internal/net/net.go index 2c98fb9e4d..4d2a8647be 100644 --- a/internal/net/net.go +++ b/internal/net/net.go @@ -34,6 +34,9 @@ import ( ) type ( + // Addr is an alias of net.Addr. + Addr = net.Addr + // Conn is an alias of net.Conn. Conn = net.Conn @@ -46,6 +49,15 @@ type ( // Resolver is an alias of net.Resolver. Resolver = net.Resolver + // UDPConn is an alias of net.UDPConn. + UDPConn = net.UDPConn + + // TCPListener is an alias of net.TCPListener. + TCPListener = net.TCPListener + + // UnixListener is an alias of net.UnixListener. + UnixListener = net.UnixListener + // NetworkType represents a network type such as TCP, TCP6, etc. NetworkType uint ) @@ -84,6 +96,9 @@ var ( // NetworkTypeFromString returns the corresponding network type from string. func NetworkTypeFromString(str string) NetworkType { + if str == "" { + return Unknown + } switch strings.ToLower(str) { case UNIX.String(): return UNIX @@ -151,13 +166,31 @@ func IsLocal(host string) bool { host == localIPv6 } +// IsUDP returns if the network type is the udp or udp4 or udp6. +func IsUDP(network string) bool { + rip := NetworkTypeFromString(network) + return rip == UDP || + rip == UDP4 || + rip == UDP6 +} + +// IsTCP returns if the network type is the tcp or tcp4 or tcp6. +func IsTCP(network string) bool { + rip := NetworkTypeFromString(network) + return rip == TCP || + rip == TCP4 || + rip == TCP6 +} + // Parse parses the hostname, IPv4 or IPv6 address and return the hostname/IP, port number, // whether the address is local IP and IPv4 or IPv6, and any parsing error occurred. // The address should contains the port number, otherwise an error will return. func Parse(addr string) (host string, port uint16, isLocal, isIPv4, isIPv6 bool, err error) { host, port, err = SplitHostPort(addr) if err != nil { - log.Warnf("failed to parse addr %s\terror: %v", addr, err) + if !errors.Is(err, errors.Errorf("address %s: missing port in address", addr)) { + log.Warnf("failed to parse addr %s\terror: %v", addr, err) + } host = addr } diff --git a/internal/net/net_test.go b/internal/net/net_test.go index 38c7688c2b..28ab0ee575 100644 --- a/internal/net/net_test.go +++ b/internal/net/net_test.go @@ -262,7 +262,7 @@ func TestParse(t *testing.T) { return errors.Errorf("host got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotHost, w.wantHost) } if !reflect.DeepEqual(gotPort, w.wantPort) { - return errors.Errorf("port got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotPort, w.wantPort) + return errors.Errorf("port got: \"%d\",\n\t\t\t\twant: \"%d\"", gotPort, w.wantPort) } if !reflect.DeepEqual(gotIsLocal, w.isLocal) { return errors.Errorf("isLocal got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIsLocal, w.isLocal) diff --git a/internal/observability/exporter/otlp/otlp.go b/internal/observability/exporter/otlp/otlp.go index b2f7c74553..cd81c0ccb4 100644 --- a/internal/observability/exporter/otlp/otlp.go +++ b/internal/observability/exporter/otlp/otlp.go @@ -31,7 +31,7 @@ import ( "go.opentelemetry.io/otel/sdk/metric" "go.opentelemetry.io/otel/sdk/resource" "go.opentelemetry.io/otel/sdk/trace" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) // Metrics and Trace attribute keys. diff --git a/internal/observability/metrics/mem/mem.go b/internal/observability/metrics/mem/mem.go index e911e71807..c042c4565c 100644 --- a/internal/observability/metrics/mem/mem.go +++ b/internal/observability/metrics/mem/mem.go @@ -19,11 +19,11 @@ import ( "os" "runtime" "strconv" - "strings" "time" "github.com/vdaas/vald/internal/conv" "github.com/vdaas/vald/internal/observability/metrics" + "github.com/vdaas/vald/internal/strings" api "go.opentelemetry.io/otel/metric" view "go.opentelemetry.io/otel/sdk/metric" ) diff --git a/internal/observability/trace/status.go b/internal/observability/trace/status.go index 292d790a25..021a10ed3c 100644 --- a/internal/observability/trace/status.go +++ b/internal/observability/trace/status.go @@ -21,7 +21,7 @@ import ( "github.com/vdaas/vald/internal/net/grpc/codes" "github.com/vdaas/vald/internal/observability/attribute" ocodes "go.opentelemetry.io/otel/codes" - semconv "go.opentelemetry.io/otel/semconv/v1.24.0" + semconv "go.opentelemetry.io/otel/semconv/v1.26.0" ) type Attributes = []attribute.KeyValue diff --git a/internal/params/params_test.go b/internal/params/params_test.go index 55a509ac0a..0df3c71ac4 100644 --- a/internal/params/params_test.go +++ b/internal/params/params_test.go @@ -285,7 +285,7 @@ func Test_parser_Parse(t *testing.T) { beforeFunc: func(t *testing.T) { t.Helper() os.Args = []string{ - "test", "--path=config.yml", + "test", "--path=config.yaml", } }, afterFunc: func(t *testing.T) { @@ -296,7 +296,7 @@ func Test_parser_Parse(t *testing.T) { want1: true, err: &os.PathError{ Op: "stat", - Path: "config.yml", + Path: "config.yaml", Err: syscall.Errno(0x2), }, }, diff --git a/internal/safety/safety.go b/internal/safety/safety.go index 27b6becf88..e6f6f382d8 100644 --- a/internal/safety/safety.go +++ b/internal/safety/safety.go @@ -19,7 +19,9 @@ package safety import ( "runtime" + "runtime/debug" + "github.com/vdaas/vald/internal/conv" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" @@ -35,16 +37,19 @@ func RecoverWithoutPanicFunc(fn func() error) func() error { func recoverFn(fn func() error, withPanic bool) func() error { return func() (err error) { + if fn == nil { + return nil + } defer func() { if r := recover(); r != nil { - infoStr := info.Get().String() + ds := debug.Stack() + infoStr := info.Get().String() + "\r\n" + conv.Btoa(ds) log.Warnf("function %#v panic recovered: %#v\ninfo:\n%s", fn, r, infoStr) switch x := r.(type) { case runtime.Error: err = errors.ErrRuntimeError(err, x) if withPanic { log.Errorf("recovered but this thread is going to panic: the reason is runtimer.Error\nerror:\t%v\ninfo:\n%s\nrecovered:\t%#v", err, infoStr, r) - panic(err) } case *string: diff --git a/internal/servers/server/option.go b/internal/servers/server/option.go index 8d6ae4f972..6527440ea4 100644 --- a/internal/servers/server/option.go +++ b/internal/servers/server/option.go @@ -608,19 +608,55 @@ func WithGRPCConnectionTimeout(to string) Option { } } -func WithGRPCMaxHeaderListSize(size int) Option { +func WithGRPCMaxHeaderListSize(size uint32) Option { return func(s *server) error { if size > 0 { - s.grpc.opts = append(s.grpc.opts, grpc.MaxHeaderListSize(uint32(size))) + s.grpc.opts = append(s.grpc.opts, grpc.MaxHeaderListSize(size)) } return nil } } -func WithGRPCHeaderTableSize(size int) Option { +func WithGRPCHeaderTableSize(size uint32) Option { return func(s *server) error { if size > 0 { - s.grpc.opts = append(s.grpc.opts, grpc.HeaderTableSize(uint32(size))) + s.grpc.opts = append(s.grpc.opts, grpc.HeaderTableSize(size)) + } + return nil + } +} + +func WithGRPCMaxConcurrentStreams(size uint32) Option { + return func(s *server) error { + if size > 0 { + s.grpc.opts = append(s.grpc.opts, grpc.MaxConcurrentStreams(size)) + } + return nil + } +} + +func WithGRPCNumStreamWorkers(size uint32) Option { + return func(s *server) error { + if size > 0 { + s.grpc.opts = append(s.grpc.opts, grpc.NumStreamWorkers(size)) + } + return nil + } +} + +func WithGRPCSharedWriteBuffer(enable bool) Option { + return func(s *server) error { + if enable { + s.grpc.opts = append(s.grpc.opts, grpc.SharedWriteBuffer(enable)) + } + return nil + } +} + +func WithGRPCWaitForHandlers(wait bool) Option { + return func(s *server) error { + if wait { + s.grpc.opts = append(s.grpc.opts, grpc.WaitForHandlers(wait)) } return nil } diff --git a/internal/servers/server/option_test.go b/internal/servers/server/option_test.go index 4fe22f01f1..f6a7bfc550 100644 --- a/internal/servers/server/option_test.go +++ b/internal/servers/server/option_test.go @@ -2103,7 +2103,7 @@ func TestWithGRPCConnectionTimeout(t *testing.T) { func TestWithGRPCMaxHeaderListSize(t *testing.T) { type test struct { name string - size int + size uint32 checkFunc func(opt Option) error } @@ -2138,24 +2138,6 @@ func TestWithGRPCMaxHeaderListSize(t *testing.T) { return nil }, }, - - { - name: "not set when size is less than 0", - size: -1, - checkFunc: func(opt Option) error { - gopts := []grpc.ServerOption{ - grpc.ConnectionTimeout(10 * time.Second), - } - got := new(server) - got.grpc.opts = gopts - opt(got) - - if !reflect.DeepEqual(got.grpc.opts, gopts) { - return errors.New("invalid param was set") - } - return nil - }, - }, } for _, tt := range tests { @@ -2171,7 +2153,7 @@ func TestWithGRPCMaxHeaderListSize(t *testing.T) { func TestWithGRPCHeaderTableSize(t *testing.T) { type test struct { name string - size int + size uint32 checkFunc func(opt Option) error } @@ -2206,24 +2188,6 @@ func TestWithGRPCHeaderTableSize(t *testing.T) { return nil }, }, - - { - name: "not set when size is less than 0", - size: -1, - checkFunc: func(opt Option) error { - gopts := []grpc.ServerOption{ - grpc.ConnectionTimeout(10 * time.Second), - } - got := new(server) - got.grpc.opts = gopts - opt(got) - - if !reflect.DeepEqual(got.grpc.opts, gopts) { - return errors.New("invalid param was set") - } - return nil - }, - }, } for _, tt := range tests { @@ -2826,6 +2790,771 @@ func TestDefaultHealthServerOption(t *testing.T) { // } // } // +// func TestWithHTTP2Enabled(t *testing.T) { +// type args struct { +// enabled bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// enabled:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// enabled:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithHTTP2Enabled(test.args.enabled) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithHandlerLimit(t *testing.T) { +// type args struct { +// size int +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithHandlerLimit(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithPermitProhibitedCipherSuites(t *testing.T) { +// type args struct { +// perm bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// perm:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// perm:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithPermitProhibitedCipherSuites(test.args.perm) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxUploadBufferPerConnection(t *testing.T) { +// type args struct { +// size int32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxUploadBufferPerConnection(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxUploadBufferPerStream(t *testing.T) { +// type args struct { +// size int32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxUploadBufferPerStream(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxConcurrentStreams(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxConcurrentStreams(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxDecoderHeaderTableSize(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxDecoderHeaderTableSize(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxEncoderHeaderTableSize(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxEncoderHeaderTableSize(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxReadFrameSize(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxReadFrameSize(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func TestWithGRPCKeepaliveMinTime(t *testing.T) { // type args struct { // min string diff --git a/internal/servers/server/server.go b/internal/servers/server/server.go index 09ef6af9d7..69477a695f 100644 --- a/internal/servers/server/server.go +++ b/internal/servers/server/server.go @@ -34,6 +34,7 @@ import ( "github.com/vdaas/vald/internal/net/control" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/grpc/credentials" + "github.com/vdaas/vald/internal/net/grpc/health" "github.com/vdaas/vald/internal/net/grpc/keepalive" glog "github.com/vdaas/vald/internal/net/grpc/logger" "github.com/vdaas/vald/internal/safety" @@ -243,8 +244,8 @@ func New(opts ...Option) (Server, error) { for _, reg := range srv.grpc.regs { reg(srv.grpc.srv) } + health.Register(srv.grpc.srv) } - if srv.lc == nil { srv.ctrl = control.New(srv.sockFlg, int(keepAlive)) srv.lc = &net.ListenConfig{ @@ -287,12 +288,13 @@ func (s *server) ListenAndServe(ctx context.Context, ech chan<- error) (err erro } } - l, err := s.lc.Listen(ctx, func() string { + network := func() string { if s.network == 0 || s.network == net.Unknown || strings.EqualFold(s.network.String(), net.Unknown.String()) { return net.TCP.String() } return s.network.String() - }(), func() string { + }() + addr := func() string { if s.network == net.UNIX { if s.socketPath == "" { sockFile := strings.Join([]string{s.name, strconv.Itoa(os.Getpid()), "sock"}, ".") @@ -301,17 +303,47 @@ func (s *server) ListenAndServe(ctx context.Context, ech chan<- error) (err erro return s.socketPath } return net.JoinHostPort(s.host, s.port) - }()) - if err != nil { - log.Errorf("failed to listen socket %v", err) - return err - } - - if s.tcfg != nil && - (len(s.tcfg.Certificates) != 0 || - s.tcfg.GetCertificate != nil || - s.tcfg.GetConfigForClient != nil) { - l = tls.NewListener(l, s.tcfg) + }() + var l net.Listener + if s.tcfg != nil && net.IsUDP(network) { + log.Error("QUIC protocol is not supported yet") + return errors.ErrUnsupportedClientMethod + } else { + if net.IsUDP(network) { + network = net.TCP.String() + } + l, err = s.lc.Listen(ctx, network, addr) + if err != nil { + log.Errorf("failed to listen socket %v", err) + return err + } + var file *os.File + switch lt := l.(type) { + case *net.TCPListener: + file, err = lt.File() + if err != nil { + log.Errorf("failed to listen tcp socket %v", err) + return err + } + case *net.UnixListener: + file, err = lt.File() + if err != nil { + log.Errorf("failed to listen unix socket %v", err) + return err + } + } + if file != nil { + err = syscall.SetNonblock(int(file.Fd()), true) + if err != nil { + return err + } + } + if s.tcfg != nil && + (len(s.tcfg.Certificates) != 0 || + s.tcfg.GetCertificate != nil || + s.tcfg.GetConfigForClient != nil) { + l = tls.NewListener(l, s.tcfg) + } } if l == nil { @@ -426,9 +458,18 @@ func (s *server) Shutdown(ctx context.Context) (rerr error) { if err != nil && err != http.ErrServerClosed && err != grpc.ErrServerStopped { rerr = errors.Join(rerr, err) } + if err != nil && + !errors.Is(err, http.ErrServerClosed) && + !errors.Is(err, grpc.ErrServerStopped) && + !errors.Is(err, context.Canceled) && + !errors.Is(err, context.DeadlineExceeded) { + rerr = errors.Join(rerr, err) + } err = sctx.Err() - if err != nil && err != context.Canceled { + if err != nil && + !errors.Is(err, context.Canceled) && + !errors.Is(err, context.DeadlineExceeded) { rerr = errors.Join(rerr, err) } diff --git a/internal/servers/servers.go b/internal/servers/servers.go index df6a98f311..e6616abbf2 100644 --- a/internal/servers/servers.go +++ b/internal/servers/servers.go @@ -63,14 +63,20 @@ func (l *listener) ListenAndServe(ctx context.Context) <-chan error { srv, ok := l.servers[name] if !ok || srv == nil { - ech <- errors.ErrServerNotFound(name) + select { + case <-ctx.Done(): + case ech <- errors.ErrServerNotFound(name): + } continue } if !l.servers[name].IsRunning() { err := l.servers[name].ListenAndServe(ctx, ech) if err != nil { - ech <- err + select { + case <-ctx.Done(): + case ech <- err: + } } } } @@ -79,7 +85,10 @@ func (l *listener) ListenAndServe(ctx context.Context) <-chan error { if !l.servers[name].IsRunning() { err := l.servers[name].ListenAndServe(ctx, ech) if err != nil { - ech <- err + select { + case <-ctx.Done(): + case ech <- err: + } } } } diff --git a/internal/servers/servers_test.go b/internal/servers/servers_test.go index 50d64ad3ff..d1ed6b6bba 100644 --- a/internal/servers/servers_test.go +++ b/internal/servers/servers_test.go @@ -154,7 +154,10 @@ func Test_listener_ListenAndServe(t *testing.T) { args: args{ ctx: func() context.Context { ctx, cancel := context.WithCancel(ctx) - defer cancel() + go func() { + defer cancel() + time.Sleep(time.Second) + }() return ctx }(), }, @@ -176,9 +179,8 @@ func Test_listener_ListenAndServe(t *testing.T) { } if len(werrs) != len(gerrs) { - return errors.Errorf("errors count is not equals: want: %v, got: %v", len(werrs), len(gerrs)) + return errors.Errorf("errors count is not equals: want: %v, got: %v", werrs, gerrs) } - for i := range werrs { if gerrs[i].Error() != werrs[i].Error() { return errors.Errorf("errors[%d] is not equals: want: %v, got: %v", i, werrs[i], gerrs[i]) diff --git a/internal/strings/strings.go b/internal/strings/strings.go index bbfc754948..b013e4e3b6 100644 --- a/internal/strings/strings.go +++ b/internal/strings/strings.go @@ -60,7 +60,6 @@ var ( SplitAfter = strings.SplitAfter SplitAfterN = strings.SplitAfterN SplitN = strings.SplitN - Title = strings.Title ToLower = strings.ToLower ToLowerSpecial = strings.ToLowerSpecial ToTitle = strings.ToTitle diff --git a/internal/sync/semaphore/semaphore_bench_test.go b/internal/sync/semaphore/semaphore_bench_test.go index 202ebaf025..3b86c19712 100644 --- a/internal/sync/semaphore/semaphore_bench_test.go +++ b/internal/sync/semaphore/semaphore_bench_test.go @@ -29,9 +29,6 @@ // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // -//go:build go1.7 -// +build go1.7 - package semaphore_test import ( diff --git a/internal/sync/singleflight/singleflight.go b/internal/sync/singleflight/singleflight.go index 2e41cdecb9..0af2ce5c28 100644 --- a/internal/sync/singleflight/singleflight.go +++ b/internal/sync/singleflight/singleflight.go @@ -104,7 +104,9 @@ type Result[V any] struct { // New returns Group implementation. func New[V any]() Group[V] { - return new(group[V]) + return &group[V]{ + m: make(map[string]*call[V]), + } } // Do executes and returns the results of the given function, making @@ -116,14 +118,10 @@ func (g *group[V]) Do( ctx context.Context, key string, fn func(context.Context) (V, error), ) (v V, shared bool, err error) { g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call[V]) - } if c, ok := g.m[key]; ok { g.mu.Unlock() atomic.AddUint64(&c.dups, 1) c.wg.Wait() - if e, ok := c.err.(*panicError); ok { panic(e) } else if c.err == errGoexit { @@ -149,9 +147,6 @@ func (g *group[V]) DoChan( ) <-chan Result[V] { ch := make(chan Result[V]) g.mu.Lock() - if g.m == nil { - g.m = make(map[string]*call[V]) - } if c, ok := g.m[key]; ok { c.dups++ c.chans = append(c.chans, ch) @@ -250,3 +245,22 @@ func (g *group[V]) Forget(key string) { delete(g.m, key) g.mu.Unlock() } + +// ForgetUnshared tells the singleflight to forget about a key if it is not +// shared with any other goroutines. Future calls to Do for a forgotten key +// will call the function rather than waiting for an earlier call to complete. +// Returns whether the key was forgotten or unknown--that is, whether no +// other goroutines are waiting for the result. +func (g *group[V]) ForgetUnshared(key string) bool { + g.mu.Lock() + defer g.mu.Unlock() + c, ok := g.m[key] + if !ok { + return true + } + if c.dups == 0 { + delete(g.m, key) + return true + } + return false +} diff --git a/internal/sync/singleflight/singleflight_test.go b/internal/sync/singleflight/singleflight_test.go index de3ed97eb8..28a46f4c20 100644 --- a/internal/sync/singleflight/singleflight_test.go +++ b/internal/sync/singleflight/singleflight_test.go @@ -57,7 +57,9 @@ func TestNew(t *testing.T) { { name: "returns Group implementation", want: want{ - want: &group[any]{}, + want: &group[any]{ + m: make(map[string]*call[any]), + }, }, }, } @@ -638,4 +640,52 @@ func ExampleGroup() { // Result: func 1 } +func TestDoTimeout(t *testing.T) { + g := New[string]() + start := time.Now() + v, _, err := g.Do(context.Background(), "key", func(context.Context) (string, error) { + time.Sleep(100 * time.Millisecond) + return "bar", nil + }) + if err != nil { + t.Errorf("Do error: %v", err) + } + if v != "bar" { + t.Errorf("Do = %s; want %s", v, "bar") + } + if time.Since(start) < 100*time.Millisecond { + t.Errorf("Do executed too quickly; expected delay") + } +} + +func TestDoMultipleErrors(t *testing.T) { + g := New[string]() + var calls int32 + someErr := errors.New("Some error") + + const n = 10 + var wg sync.WaitGroup + for i := 0; i < n; i++ { + wg.Add(1) + go func() { + defer wg.Done() + v, _, err := g.Do(context.Background(), "key", func(context.Context) (string, error) { + atomic.AddInt32(&calls, 1) + time.Sleep(10 * time.Millisecond) + return "", someErr + }) + if err != someErr { + t.Errorf("Do error = %v; want %v", err, someErr) + } + if v != "" { + t.Errorf("Do = %v; want empty string", v) + } + }() + } + wg.Wait() + if got := atomic.LoadInt32(&calls); got != 1 { + t.Errorf("number of calls = %d; want 1", got) + } +} + // NOT IMPLEMENTED BELOW diff --git a/internal/test/data/vector/gen_test.go b/internal/test/data/vector/gen_test.go index 77c8543215..0f279a5610 100644 --- a/internal/test/data/vector/gen_test.go +++ b/internal/test/data/vector/gen_test.go @@ -47,8 +47,13 @@ func TestFloat32VectorGenerator(t *testing.T) { } if got != nil { vectors := got(a.n, a.dim) - if len(vectors) != w.n && len(vectors[0]) != w.dim { - return errors.Errorf("got: \"%d\",\"%d\"\n\t\t\t\twant: \"%d\",\"%d\"", len(vectors), len(vectors[0]), w.n, w.dim) + if len(vectors) != w.n { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twantLen: \"%#v\"", len(vectors), w.n) + } + for _, vec := range vectors { + if len(vec) != w.dim { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twantDim: \"%#v\"", len(vec), w.dim) + } } } return nil @@ -140,8 +145,13 @@ func TestUint8VectorGenerator(t *testing.T) { } if got != nil { vectors := got(a.n, a.dim) - if len(vectors) != w.n && len(vectors[0]) != w.dim { - return errors.Errorf("got: \"%d\",\"%d\"\n\t\t\t\twant: \"%d\",\"%d\"", len(vectors), len(vectors[0]), w.n, w.dim) + if len(vectors) != w.n { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twantLen: \"%#v\"", len(vectors), w.n) + } + for _, vec := range vectors { + if len(vec) != w.dim { + return errors.Errorf("got: \"%#v\",\n\t\t\t\twantDim: \"%#v\"", len(vec), w.dim) + } } } return nil diff --git a/internal/test/mock/grpc/grpc_client_mock.go b/internal/test/mock/grpc/grpc_client_mock.go index 11b9097cf6..078a409d9d 100644 --- a/internal/test/mock/grpc/grpc_client_mock.go +++ b/internal/test/mock/grpc/grpc_client_mock.go @@ -30,10 +30,11 @@ type GRPCClientMock struct { addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) error) error - ConnectFunc func(ctx context.Context, addr string, dopts ...grpc.DialOption) (pool.Conn, error) - DisconnectFunc func(ctx context.Context, addr string) error - IsConnectedFunc func(ctx context.Context, addr string) bool - ConnectedAddrsFunc func() []string + ConnectFunc func(ctx context.Context, addr string, dopts ...grpc.DialOption) (pool.Conn, error) + DisconnectFunc func(ctx context.Context, addr string) error + IsConnectedFunc func(ctx context.Context, addr string) bool + ConnectedAddrsFunc func() []string + SetDisableResolveDNSAddrFunc func(addr string, disabled bool) } // OrderedRangeConcurrent calls the OrderedRangeConcurrentFunc object. @@ -70,3 +71,8 @@ func (gc *GRPCClientMock) Disconnect(ctx context.Context, addr string) error { func (gc *GRPCClientMock) IsConnected(ctx context.Context, addr string) bool { return gc.IsConnectedFunc(ctx, addr) } + +// SetDisableResolveDNSAddr calls the SetDisableResolveDNSAddr object. +func (gc *GRPCClientMock) SetDisableResolveDNSAddr(addr string, disabled bool) { + gc.SetDisableResolveDNSAddrFunc(addr, disabled) +} diff --git a/internal/test/mock/grpc_testify_mock.go b/internal/test/mock/grpc_testify_mock.go index ce9c81b108..20e215d29f 100644 --- a/internal/test/mock/grpc_testify_mock.go +++ b/internal/test/mock/grpc_testify_mock.go @@ -213,3 +213,5 @@ func (c *ClientInternal) Close(ctx context.Context) error { args := c.Called(ctx) return args.Error(0) } + +func (c *ClientInternal) SetDisableResolveDNSAddr(addr string, distributed bool) {} diff --git a/internal/tls/tls.go b/internal/tls/tls.go index 74254ab391..be150d4b35 100644 --- a/internal/tls/tls.go +++ b/internal/tls/tls.go @@ -49,7 +49,7 @@ var ( // NewTLSConfig returns a *tls.Config struct or error // This function read TLS configuration and initialize *tls.Config struct. // This function initialize TLS configuration, for example the CA certificate and key to start TLS server. -// Server and CA Certificate, and private key will read from a file from the file path definied in environment variable. +// Server and CA Certificate, and private key will read from a file from the file path defined in environment variable. func New(opts ...Option) (*Config, error) { c, err := newCredential(opts...) if err != nil { diff --git a/internal/worker/queue.go b/internal/worker/queue.go index 8a26146985..ff065e305d 100644 --- a/internal/worker/queue.go +++ b/internal/worker/queue.go @@ -64,8 +64,8 @@ func NewQueue(opts ...QueueOption) (Queue, error) { return q, nil } -// Start starts execute queueing if queue is not runnnig. -// If queue is already reunning, it returns error. +// Start starts execute queueing if queue is not running. +// If queue is already running, it returns error. // It returns the error channel that the queueing job return. func (q *queue) Start(ctx context.Context) (<-chan error, error) { if q.isRunning() { @@ -132,7 +132,7 @@ func (q *queue) Push(ctx context.Context, job JobFunc) error { } } -// Pop returns (JobFunc, nil) if the channnel, which will be used for queuing job, contains JobFunc. +// Pop returns (JobFunc, nil) if the channel, which will be used for queuing job, contains JobFunc. // It returns (nil ,error) if it failed to pop from the job queue. func (q *queue) Pop(ctx context.Context) (JobFunc, error) { tryCnt := int(q.Len()) + 1 // include the first try diff --git a/internal/worker/queue_option.go b/internal/worker/queue_option.go index a0ccbaf27f..84a56c1629 100644 --- a/internal/worker/queue_option.go +++ b/internal/worker/queue_option.go @@ -52,7 +52,7 @@ func WithQueueErrGroup(eg errgroup.Group) QueueOption { } // WithQueueCheckDuration returns the option to set the qcdur for queue. -// If dur is invalid string, it returns errror. +// If dur is invalid string, it returns error. func WithQueueCheckDuration(dur string) QueueOption { return func(q *queue) error { if len(dur) == 0 { diff --git a/k8s/agent/ngt/configmap.yaml b/k8s/agent/ngt/configmap.yaml index f864cf1813..c05b861686 100644 --- a/k8s/agent/ngt/configmap.yaml +++ b/k8s/agent/ngt/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-agent-config labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: agent data: config.yaml: | @@ -42,6 +42,7 @@ data: bidirectional_stream_concurrency: 20 connection_timeout: "" enable_admin: true + enable_channelz: true enable_reflection: true header_table_size: 0 initial_conn_window_size: 2097152 @@ -56,10 +57,14 @@ data: permit_without_stream: false time: 3h timeout: 60s + max_concurrent_streams: 0 max_header_list_size: 0 max_receive_message_size: 0 max_send_message_size: 0 + num_stream_workers: 0 read_buffer_size: 0 + shared_write_buffer: false + wait_for_handlers: true write_buffer_size: 0 mode: GRPC network: tcp diff --git a/k8s/agent/pdb.yaml b/k8s/agent/pdb.yaml index 6560ab3114..bed8ec4ac5 100644 --- a/k8s/agent/pdb.yaml +++ b/k8s/agent/pdb.yaml @@ -19,13 +19,14 @@ metadata: name: vald-agent labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: agent spec: maxUnavailable: 1 selector: matchLabels: app: vald-agent + unhealthyPodEvictionPolicy: IfHealthyBudget diff --git a/k8s/agent/priorityclass.yaml b/k8s/agent/priorityclass.yaml index 73b296eb43..d5a0f0f31c 100644 --- a/k8s/agent/priorityclass.yaml +++ b/k8s/agent/priorityclass.yaml @@ -19,10 +19,10 @@ metadata: name: default-vald-agent-priority labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: agent value: 1e+09 preemptionPolicy: Never diff --git a/k8s/agent/statefulset.yaml b/k8s/agent/statefulset.yaml index 32d2e342fb..a271e801e4 100644 --- a/k8s/agent/statefulset.yaml +++ b/k8s/agent/statefulset.yaml @@ -20,10 +20,10 @@ metadata: labels: app: vald-agent app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: agent spec: serviceName: vald-agent diff --git a/k8s/agent/svc.yaml b/k8s/agent/svc.yaml index bfa8aeda2e..8b10c293a3 100644 --- a/k8s/agent/svc.yaml +++ b/k8s/agent/svc.yaml @@ -19,10 +19,10 @@ metadata: name: vald-agent labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: agent spec: ports: diff --git a/k8s/discoverer/clusterrole.yaml b/k8s/discoverer/clusterrole.yaml index 070d1d5fe7..4a0f8cfb35 100644 --- a/k8s/discoverer/clusterrole.yaml +++ b/k8s/discoverer/clusterrole.yaml @@ -19,10 +19,10 @@ metadata: name: discoverer labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: discoverer rules: - apiGroups: diff --git a/k8s/discoverer/clusterrolebinding.yaml b/k8s/discoverer/clusterrolebinding.yaml index 0f59b5dc9c..652ddd9967 100644 --- a/k8s/discoverer/clusterrolebinding.yaml +++ b/k8s/discoverer/clusterrolebinding.yaml @@ -19,10 +19,10 @@ metadata: name: discoverer labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: discoverer roleRef: apiGroup: rbac.authorization.k8s.io diff --git a/k8s/discoverer/configmap.yaml b/k8s/discoverer/configmap.yaml index cdae655d30..07f4379534 100644 --- a/k8s/discoverer/configmap.yaml +++ b/k8s/discoverer/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-discoverer-config labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: discoverer data: config.yaml: | @@ -42,6 +42,7 @@ data: bidirectional_stream_concurrency: 20 connection_timeout: "" enable_admin: true + enable_channelz: true enable_reflection: true header_table_size: 0 initial_conn_window_size: 2097152 @@ -56,10 +57,14 @@ data: permit_without_stream: false time: 3h timeout: 60s + max_concurrent_streams: 0 max_header_list_size: 0 max_receive_message_size: 0 max_send_message_size: 0 + num_stream_workers: 0 read_buffer_size: 0 + shared_write_buffer: false + wait_for_handlers: true write_buffer_size: 0 mode: GRPC network: tcp @@ -264,6 +269,7 @@ data: cache_enabled: true cache_expiration: 24h refresh_duration: 5m + network: tcp socket_option: ip_recover_destination_addr: false ip_transparent: false diff --git a/k8s/discoverer/deployment.yaml b/k8s/discoverer/deployment.yaml index c680a4c54f..3845cbda17 100644 --- a/k8s/discoverer/deployment.yaml +++ b/k8s/discoverer/deployment.yaml @@ -20,10 +20,10 @@ metadata: labels: app: vald-discoverer app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: discoverer spec: progressDeadlineSeconds: 600 @@ -46,7 +46,7 @@ spec: app.kubernetes.io/instance: release-name app.kubernetes.io/component: discoverer annotations: - checksum/configmap: 2ca8f5721cdc6f8582f3701cf8ed2a34de7732052bd54bce90880889025d50d1 + checksum/configmap: ef110d19d3dfcbad8873d75c1cf75e8dc2b37ffeb92ecbf783a12d119f032013 profefe.com/enable: "true" profefe.com/port: "6060" profefe.com/service: vald-discoverer diff --git a/k8s/discoverer/pdb.yaml b/k8s/discoverer/pdb.yaml index 470e173306..48291000b3 100644 --- a/k8s/discoverer/pdb.yaml +++ b/k8s/discoverer/pdb.yaml @@ -19,13 +19,14 @@ metadata: name: vald-discoverer labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: discoverer spec: maxUnavailable: 50% selector: matchLabels: app: vald-discoverer + unhealthyPodEvictionPolicy: AlwaysAllow diff --git a/k8s/discoverer/priorityclass.yaml b/k8s/discoverer/priorityclass.yaml index 1de844a01e..2080b4f3ab 100644 --- a/k8s/discoverer/priorityclass.yaml +++ b/k8s/discoverer/priorityclass.yaml @@ -19,10 +19,10 @@ metadata: name: default-vald-discoverer-priority labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: discoverer value: 1e+06 globalDefault: false diff --git a/k8s/discoverer/serviceaccount.yaml b/k8s/discoverer/serviceaccount.yaml index 0d9f022c49..a3b0b1e8a8 100644 --- a/k8s/discoverer/serviceaccount.yaml +++ b/k8s/discoverer/serviceaccount.yaml @@ -19,8 +19,8 @@ metadata: name: vald labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: discoverer diff --git a/k8s/discoverer/svc.yaml b/k8s/discoverer/svc.yaml index 1702f96219..34873558f7 100644 --- a/k8s/discoverer/svc.yaml +++ b/k8s/discoverer/svc.yaml @@ -19,10 +19,10 @@ metadata: name: vald-discoverer labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: discoverer spec: ports: diff --git a/k8s/gateway/gateway/ing.yaml b/k8s/gateway/gateway/ing.yaml index 3e0c101ac4..2a7a3de2c2 100644 --- a/k8s/gateway/gateway/ing.yaml +++ b/k8s/gateway/gateway/ing.yaml @@ -22,10 +22,10 @@ metadata: name: vald-lb-gateway-ingress app: vald-lb-gateway-ingress app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: gateway-lb name: release-name-ingress spec: @@ -38,7 +38,22 @@ spec: - host: lb.gateway.vald.vdaas.org http: paths: - - backend: + - path: "/" + backend: + service: + name: vald-lb-gateway + port: + name: grpc + pathType: ImplementationSpecific + - path: "/grpc.reflection.v1alpha.ServerReflection/ServerReflectionInfo" + backend: + service: + name: vald-lb-gateway + port: + name: grpc + pathType: ImplementationSpecific + - path: "/grpc.reflection.v1.ServerReflection/ServerReflectionInfo" + backend: service: name: vald-lb-gateway port: diff --git a/k8s/gateway/gateway/lb/configmap.yaml b/k8s/gateway/gateway/lb/configmap.yaml index db958a0ca1..7d588d985d 100644 --- a/k8s/gateway/gateway/lb/configmap.yaml +++ b/k8s/gateway/gateway/lb/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-lb-gateway-config labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: gateway-lb data: config.yaml: | @@ -42,6 +42,7 @@ data: bidirectional_stream_concurrency: 20 connection_timeout: "" enable_admin: true + enable_channelz: true enable_reflection: true header_table_size: 0 initial_conn_window_size: 2097152 @@ -56,10 +57,14 @@ data: permit_without_stream: false time: 3h timeout: 60s + max_concurrent_streams: 0 max_header_list_size: 0 max_receive_message_size: 0 max_send_message_size: 0 + num_stream_workers: 0 read_buffer_size: 0 + shared_write_buffer: false + wait_for_handlers: true write_buffer_size: 0 mode: GRPC network: tcp @@ -266,16 +271,20 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 wait_for_ready: true dial_option: + authority: "" backoff_base_delay: 1s backoff_jitter: 0.2 backoff_max_delay: 120s backoff_multiplier: 1.6 + disable_retry: false enable_backoff: false + idle_timeout: 1h initial_connection_window_size: 2097152 initial_window_size: 1048576 insecure: true @@ -284,6 +293,8 @@ data: permit_without_stream: false time: "" timeout: 30s + max_call_attempts: 0 + max_header_list_size: 0 max_msg_size: 0 min_connection_timeout: 20s net: @@ -295,6 +306,7 @@ data: cache_enabled: true cache_expiration: 1h refresh_duration: 30m + network: tcp socket_option: ip_recover_destination_addr: false ip_transparent: false @@ -312,7 +324,9 @@ data: insecure_skip_verify: false key: /path/to/key read_buffer_size: 0 + shared_write_buffer: false timeout: "" + user_agent: Vald-gRPC write_buffer_size: 0 tls: ca: /path/to/ca @@ -344,16 +358,20 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 wait_for_ready: true dial_option: + authority: "" backoff_base_delay: 1s backoff_jitter: 0.2 backoff_max_delay: 120s backoff_multiplier: 1.6 + disable_retry: false enable_backoff: false + idle_timeout: 1h initial_connection_window_size: 2097152 initial_window_size: 1048576 insecure: true @@ -362,6 +380,8 @@ data: permit_without_stream: false time: "" timeout: 30s + max_call_attempts: 0 + max_header_list_size: 0 max_msg_size: 0 min_connection_timeout: 20s net: @@ -373,6 +393,7 @@ data: cache_enabled: true cache_expiration: 1h refresh_duration: 30m + network: tcp socket_option: ip_recover_destination_addr: false ip_transparent: false @@ -390,7 +411,9 @@ data: insecure_skip_verify: false key: /path/to/key read_buffer_size: 0 + shared_write_buffer: false timeout: "" + user_agent: Vald-gRPC write_buffer_size: 0 tls: ca: /path/to/ca diff --git a/k8s/gateway/gateway/lb/deployment.yaml b/k8s/gateway/gateway/lb/deployment.yaml index 306ed07af8..8af6125d56 100644 --- a/k8s/gateway/gateway/lb/deployment.yaml +++ b/k8s/gateway/gateway/lb/deployment.yaml @@ -20,10 +20,10 @@ metadata: labels: app: vald-lb-gateway app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: gateway-lb spec: progressDeadlineSeconds: 600 @@ -45,7 +45,7 @@ spec: app.kubernetes.io/instance: release-name app.kubernetes.io/component: gateway-lb annotations: - checksum/configmap: 39f04e0b0c8ba58e4abaa66146b382efd4c3b9349019967d926267ecafed7b37 + checksum/configmap: cedf74497a9cad5a836d57d70e151d02128fdeba1c51ecad321c80163ee010a3 profefe.com/enable: "true" profefe.com/port: "6060" profefe.com/service: vald-lb-gateway @@ -58,6 +58,7 @@ spec: initContainers: - name: wait-for-discoverer image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e @@ -69,6 +70,7 @@ spec: done - name: wait-for-agent image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e diff --git a/k8s/gateway/gateway/lb/hpa.yaml b/k8s/gateway/gateway/lb/hpa.yaml index 21f359e7c5..4fe77a2997 100644 --- a/k8s/gateway/gateway/lb/hpa.yaml +++ b/k8s/gateway/gateway/lb/hpa.yaml @@ -19,10 +19,10 @@ metadata: name: vald-lb-gateway labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: gateway-lb spec: maxReplicas: 9 diff --git a/k8s/gateway/gateway/lb/pdb.yaml b/k8s/gateway/gateway/lb/pdb.yaml index 314c771205..d05d1503f0 100644 --- a/k8s/gateway/gateway/lb/pdb.yaml +++ b/k8s/gateway/gateway/lb/pdb.yaml @@ -19,13 +19,14 @@ metadata: name: vald-lb-gateway labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: gateway-lb spec: maxUnavailable: 50% selector: matchLabels: app: vald-lb-gateway + unhealthyPodEvictionPolicy: AlwaysAllow diff --git a/k8s/gateway/gateway/lb/priorityclass.yaml b/k8s/gateway/gateway/lb/priorityclass.yaml index 8cb2eecb6b..677dd656dd 100644 --- a/k8s/gateway/gateway/lb/priorityclass.yaml +++ b/k8s/gateway/gateway/lb/priorityclass.yaml @@ -19,10 +19,10 @@ metadata: name: default-vald-lb-gateway-priority labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: gateway-lb value: 1e+06 globalDefault: false diff --git a/k8s/gateway/gateway/lb/svc.yaml b/k8s/gateway/gateway/lb/svc.yaml index 3c36aa6f27..26a005fae8 100644 --- a/k8s/gateway/gateway/lb/svc.yaml +++ b/k8s/gateway/gateway/lb/svc.yaml @@ -19,10 +19,10 @@ metadata: name: vald-lb-gateway labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: gateway-lb spec: ports: diff --git a/k8s/index/job/correction/configmap.yaml b/k8s/index/job/correction/configmap.yaml index f13b88f419..20ebc26306 100644 --- a/k8s/index/job/correction/configmap.yaml +++ b/k8s/index/job/correction/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-index-correction-config labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: vald-index-correction data: config.yaml: | @@ -42,6 +42,7 @@ data: bidirectional_stream_concurrency: 20 connection_timeout: "" enable_admin: true + enable_channelz: true enable_reflection: true header_table_size: 0 initial_conn_window_size: 2097152 @@ -56,10 +57,14 @@ data: permit_without_stream: false time: 3h timeout: 60s + max_concurrent_streams: 0 max_header_list_size: 0 max_receive_message_size: 0 max_send_message_size: 0 + num_stream_workers: 0 read_buffer_size: 0 + shared_write_buffer: false + wait_for_handlers: true write_buffer_size: 0 mode: GRPC network: tcp @@ -266,16 +271,20 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 wait_for_ready: true dial_option: + authority: "" backoff_base_delay: 1s backoff_jitter: 0.2 backoff_max_delay: 120s backoff_multiplier: 1.6 + disable_retry: false enable_backoff: false + idle_timeout: 1h initial_connection_window_size: 2097152 initial_window_size: 1048576 insecure: true @@ -284,6 +293,8 @@ data: permit_without_stream: false time: "" timeout: 30s + max_call_attempts: 0 + max_header_list_size: 0 max_msg_size: 0 min_connection_timeout: 20s net: @@ -295,6 +306,7 @@ data: cache_enabled: true cache_expiration: 1h refresh_duration: 30m + network: tcp socket_option: ip_recover_destination_addr: false ip_transparent: false @@ -312,7 +324,9 @@ data: insecure_skip_verify: false key: /path/to/key read_buffer_size: 0 + shared_write_buffer: false timeout: "" + user_agent: Vald-gRPC write_buffer_size: 0 tls: ca: /path/to/ca @@ -347,16 +361,20 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 wait_for_ready: true dial_option: + authority: "" backoff_base_delay: 1s backoff_jitter: 0.2 backoff_max_delay: 120s backoff_multiplier: 1.6 + disable_retry: false enable_backoff: false + idle_timeout: 1h initial_connection_window_size: 2097152 initial_window_size: 1048576 insecure: true @@ -365,6 +383,8 @@ data: permit_without_stream: false time: "" timeout: 30s + max_call_attempts: 0 + max_header_list_size: 0 max_msg_size: 0 min_connection_timeout: 20s net: @@ -376,6 +396,7 @@ data: cache_enabled: true cache_expiration: 1h refresh_duration: 30m + network: tcp socket_option: ip_recover_destination_addr: false ip_transparent: false @@ -393,7 +414,9 @@ data: insecure_skip_verify: false key: /path/to/key read_buffer_size: 0 + shared_write_buffer: false timeout: "" + user_agent: Vald-gRPC write_buffer_size: 0 tls: ca: /path/to/ca @@ -425,6 +448,7 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 diff --git a/k8s/index/job/correction/cronjob.yaml b/k8s/index/job/correction/cronjob.yaml index 1aead0dd97..1e141dad6f 100644 --- a/k8s/index/job/correction/cronjob.yaml +++ b/k8s/index/job/correction/cronjob.yaml @@ -20,11 +20,11 @@ metadata: labels: app: vald-index-correction app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name app.kubernetes.io/component: vald-index-correction - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 spec: schedule: "6 3 * * *" concurrencyPolicy: Forbid @@ -38,11 +38,11 @@ spec: labels: app: vald-index-correction app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name app.kubernetes.io/component: vald-index-correction - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 annotations: pyroscope.io/scrape: "true" pyroscope.io/application-name: vald-index-correction @@ -53,6 +53,7 @@ spec: initContainers: - name: wait-for-agent image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e @@ -64,6 +65,7 @@ spec: done - name: wait-for-discoverer image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e @@ -73,6 +75,15 @@ spec: echo "waiting for discoverer to be ready..." sleep 2; done + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: [] containers: - name: vald-index-correction image: "vdaas/vald-index-correction:nightly" diff --git a/k8s/index/job/creation/configmap.yaml b/k8s/index/job/creation/configmap.yaml index 49c14c8419..2e05a9d6f6 100644 --- a/k8s/index/job/creation/configmap.yaml +++ b/k8s/index/job/creation/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-index-creation-config labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: vald-index-creation data: config.yaml: | @@ -42,6 +42,7 @@ data: bidirectional_stream_concurrency: 20 connection_timeout: "" enable_admin: true + enable_channelz: true enable_reflection: true header_table_size: 0 initial_conn_window_size: 2097152 @@ -56,10 +57,14 @@ data: permit_without_stream: false time: 3h timeout: 60s + max_concurrent_streams: 0 max_header_list_size: 0 max_receive_message_size: 0 max_send_message_size: 0 + num_stream_workers: 0 read_buffer_size: 0 + shared_write_buffer: false + wait_for_handlers: true write_buffer_size: 0 mode: GRPC network: tcp @@ -266,16 +271,20 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 wait_for_ready: true dial_option: + authority: "" backoff_base_delay: 1s backoff_jitter: 0.2 backoff_max_delay: 120s backoff_multiplier: 1.6 + disable_retry: false enable_backoff: false + idle_timeout: 1h initial_connection_window_size: 2097152 initial_window_size: 1048576 insecure: true @@ -284,6 +293,8 @@ data: permit_without_stream: false time: "" timeout: 30s + max_call_attempts: 0 + max_header_list_size: 0 max_msg_size: 0 min_connection_timeout: 20s net: @@ -295,6 +306,7 @@ data: cache_enabled: true cache_expiration: 1h refresh_duration: 30m + network: tcp socket_option: ip_recover_destination_addr: false ip_transparent: false @@ -312,7 +324,9 @@ data: insecure_skip_verify: false key: /path/to/key read_buffer_size: 0 + shared_write_buffer: false timeout: "" + user_agent: Vald-gRPC write_buffer_size: 0 tls: ca: /path/to/ca @@ -344,6 +358,7 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 diff --git a/k8s/index/job/creation/cronjob.yaml b/k8s/index/job/creation/cronjob.yaml index 72336e75a4..c3fb8546f9 100644 --- a/k8s/index/job/creation/cronjob.yaml +++ b/k8s/index/job/creation/cronjob.yaml @@ -20,11 +20,11 @@ metadata: labels: app: vald-index-creation app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name app.kubernetes.io/component: vald-index-creation - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 spec: schedule: "* * * * *" concurrencyPolicy: Forbid @@ -38,11 +38,11 @@ spec: labels: app: vald-index-creation app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name app.kubernetes.io/component: vald-index-creation - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 annotations: pyroscope.io/scrape: "true" pyroscope.io/application-name: vald-index-creation @@ -53,6 +53,7 @@ spec: initContainers: - name: wait-for-agent image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e @@ -64,6 +65,7 @@ spec: done - name: wait-for-discoverer image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e @@ -73,6 +75,15 @@ spec: echo "waiting for discoverer to be ready..." sleep 2; done + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: [] containers: - name: vald-index-creation image: "vdaas/vald-index-creation:nightly" diff --git a/k8s/index/job/save/configmap.yaml b/k8s/index/job/save/configmap.yaml index 0a243704f8..fcb2cd929a 100644 --- a/k8s/index/job/save/configmap.yaml +++ b/k8s/index/job/save/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-index-save-config labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: vald-index-save data: config.yaml: | @@ -42,6 +42,7 @@ data: bidirectional_stream_concurrency: 20 connection_timeout: "" enable_admin: true + enable_channelz: true enable_reflection: true header_table_size: 0 initial_conn_window_size: 2097152 @@ -56,10 +57,14 @@ data: permit_without_stream: false time: 3h timeout: 60s + max_concurrent_streams: 0 max_header_list_size: 0 max_receive_message_size: 0 max_send_message_size: 0 + num_stream_workers: 0 read_buffer_size: 0 + shared_write_buffer: false + wait_for_handlers: true write_buffer_size: 0 mode: GRPC network: tcp @@ -266,16 +271,20 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 wait_for_ready: true dial_option: + authority: "" backoff_base_delay: 1s backoff_jitter: 0.2 backoff_max_delay: 120s backoff_multiplier: 1.6 + disable_retry: false enable_backoff: false + idle_timeout: 1h initial_connection_window_size: 2097152 initial_window_size: 1048576 insecure: true @@ -284,6 +293,8 @@ data: permit_without_stream: false time: "" timeout: 30s + max_call_attempts: 0 + max_header_list_size: 0 max_msg_size: 0 min_connection_timeout: 20s net: @@ -295,6 +306,7 @@ data: cache_enabled: true cache_expiration: 1h refresh_duration: 30m + network: tcp socket_option: ip_recover_destination_addr: false ip_transparent: false @@ -312,7 +324,9 @@ data: insecure_skip_verify: false key: /path/to/key read_buffer_size: 0 + shared_write_buffer: false timeout: "" + user_agent: Vald-gRPC write_buffer_size: 0 tls: ca: /path/to/ca @@ -344,6 +358,7 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 diff --git a/k8s/index/job/save/cronjob.yaml b/k8s/index/job/save/cronjob.yaml index 5672e0319a..613d4f00fe 100644 --- a/k8s/index/job/save/cronjob.yaml +++ b/k8s/index/job/save/cronjob.yaml @@ -20,11 +20,11 @@ metadata: labels: app: vald-index-save app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name app.kubernetes.io/component: vald-index-save - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 spec: schedule: "0 */3 * * *" concurrencyPolicy: Forbid @@ -38,11 +38,11 @@ spec: labels: app: vald-index-save app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name app.kubernetes.io/component: vald-index-save - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 annotations: pyroscope.io/scrape: "true" pyroscope.io/application-name: vald-index-save @@ -53,6 +53,7 @@ spec: initContainers: - name: wait-for-agent image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e @@ -64,6 +65,7 @@ spec: done - name: wait-for-discoverer image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e @@ -73,6 +75,15 @@ spec: echo "waiting for discoverer to be ready..." sleep 2; done + affinity: + nodeAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + podAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: [] + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: [] + requiredDuringSchedulingIgnoredDuringExecution: [] containers: - name: vald-index-save image: "vdaas/vald-index-save:nightly" diff --git a/k8s/index/operator/configmap.yaml b/k8s/index/operator/configmap.yaml index 59724a691c..f243edfa5f 100644 --- a/k8s/index/operator/configmap.yaml +++ b/k8s/index/operator/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-index-operator-config labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: index-operator data: - config.yaml: "---\nversion: v0.0.0\ntime_zone: UTC\nlogging:\n format: raw\n level: debug\n logger: glg\nserver_config:\n servers:\n - name: grpc\n host: 0.0.0.0\n port: 8081\n grpc:\n bidirectional_stream_concurrency: 20\n connection_timeout: \"\"\n enable_admin: true\n enable_reflection: true\n header_table_size: 0\n initial_conn_window_size: 2097152\n initial_window_size: 1048576\n interceptors:\n - RecoverInterceptor\n keepalive:\n max_conn_age: \"\"\n max_conn_age_grace: \"\"\n max_conn_idle: \"\"\n min_time: 10m\n permit_without_stream: false\n time: 3h\n timeout: 60s\n max_header_list_size: 0\n max_receive_message_size: 0\n max_send_message_size: 0\n read_buffer_size: 0\n write_buffer_size: 0\n mode: GRPC\n network: tcp\n probe_wait_time: 3s\n restart: true\n socket_option:\n ip_recover_destination_addr: false\n ip_transparent: false\n reuse_addr: true\n reuse_port: true\n tcp_cork: false\n tcp_defer_accept: false\n tcp_fast_open: false\n tcp_no_delay: false\n tcp_quick_ack: false\n socket_path: \"\"\n health_check_servers:\n - name: liveness\n host: 0.0.0.0\n port: 3000\n http:\n handler_timeout: \"\"\n http2:\n enabled: false\n handler_limit: 0\n max_concurrent_streams: 0\n max_decoder_header_table_size: 4096\n max_encoder_header_table_size: 4096\n max_read_frame_size: 0\n max_upload_buffer_per_connection: 0\n max_upload_buffer_per_stream: 0\n permit_prohibited_cipher_suites: true\n idle_timeout: \"\"\n read_header_timeout: \"\"\n read_timeout: \"\"\n shutdown_duration: 5s\n write_timeout: \"\"\n mode: REST\n network: tcp\n probe_wait_time: 3s\n restart: true\n socket_option:\n ip_recover_destination_addr: false\n ip_transparent: false\n reuse_addr: true\n reuse_port: true\n tcp_cork: false\n tcp_defer_accept: false\n tcp_fast_open: true\n tcp_no_delay: true\n tcp_quick_ack: true\n socket_path: \"\"\n - name: readiness\n host: 0.0.0.0\n port: 3001\n http:\n handler_timeout: \"\"\n http2:\n enabled: false\n handler_limit: 0\n max_concurrent_streams: 0\n max_decoder_header_table_size: 4096\n max_encoder_header_table_size: 4096\n max_read_frame_size: 0\n max_upload_buffer_per_connection: 0\n max_upload_buffer_per_stream: 0\n permit_prohibited_cipher_suites: true\n idle_timeout: \"\"\n read_header_timeout: \"\"\n read_timeout: \"\"\n shutdown_duration: 0s\n write_timeout: \"\"\n mode: REST\n network: tcp\n probe_wait_time: 3s\n restart: true\n socket_option:\n ip_recover_destination_addr: false\n ip_transparent: false\n reuse_addr: true\n reuse_port: true\n tcp_cork: false\n tcp_defer_accept: false\n tcp_fast_open: true\n tcp_no_delay: true\n tcp_quick_ack: true\n socket_path: \"\"\n metrics_servers:\n - name: pprof\n host: 0.0.0.0\n port: 6060\n http:\n handler_timeout: 5s\n http2:\n enabled: false\n handler_limit: 0\n max_concurrent_streams: 0\n max_decoder_header_table_size: 4096\n max_encoder_header_table_size: 4096\n max_read_frame_size: 0\n max_upload_buffer_per_connection: 0\n max_upload_buffer_per_stream: 0\n permit_prohibited_cipher_suites: true\n idle_timeout: 2s\n read_header_timeout: 1s\n read_timeout: 1s\n shutdown_duration: 5s\n write_timeout: 1m\n mode: REST\n network: tcp\n probe_wait_time: 3s\n restart: true\n socket_option:\n ip_recover_destination_addr: false\n ip_transparent: false\n reuse_addr: true\n reuse_port: true\n tcp_cork: true\n tcp_defer_accept: false\n tcp_fast_open: false\n tcp_no_delay: false\n tcp_quick_ack: false\n socket_path: \"\"\n startup_strategy:\n - liveness\n - pprof\n - grpc\n - readiness\n shutdown_strategy:\n - readiness\n - grpc\n - pprof\n - liveness\n full_shutdown_duration: 600s\n tls:\n ca: /path/to/ca\n cert: /path/to/cert\n enabled: false\n insecure_skip_verify: false\n key: /path/to/key\nobservability:\n enabled: false\n otlp:\n collector_endpoint: \"\"\n trace_batch_timeout: \"1s\"\n trace_export_timeout: \"1m\"\n trace_max_export_batch_size: 1024\n trace_max_queue_size: 256\n metrics_export_interval: \"1s\"\n metrics_export_timeout: \"1m\"\n attribute:\n namespace: \"_MY_POD_NAMESPACE_\"\n pod_name: \"_MY_POD_NAME_\"\n node_name: \"_MY_NODE_NAME_\"\n service_name: \"vald-index-operator\"\n metrics:\n enable_cgo: true\n enable_goroutine: true\n enable_memory: true\n enable_version_info: true\n version_info_labels:\n - vald_version\n - server_name\n - git_commit\n - build_time\n - go_version\n - go_os\n - go_arch\n - algorithm_info\n trace:\n enabled: false\noperator:\n namespace: _MY_POD_NAMESPACE_\n agent_name: vald-agent\n agent_namespace: \n rotator_name: vald-readreplica-rotate\n target_read_replica_id_annotations_key: vald.vdaas.org/target-read-replica-id\n rotation_job_concurrency: 2\n read_replica_enabled: false\n read_replica_label_key: vald-readreplica-id\n job_templates:\n rotate:\n apiVersion: batch/v1\n kind: Job\n metadata:\n name: vald-readreplica-rotate\n labels:\n app: vald-readreplica-rotate\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.13\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-readreplica-rotate\n app.kubernetes.io/version: v1.7.13\n spec:\n ttlSecondsAfterFinished: 86400\n template:\n metadata:\n labels:\n app: vald-readreplica-rotate\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.13\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-readreplica-rotate\n app.kubernetes.io/version: v1.7.13\n annotations:\n pyroscope.io/scrape: \"true\"\n pyroscope.io/application-name: vald-readreplica-rotate\n pyroscope.io/profile-cpu-enabled: \"true\"\n pyroscope.io/profile-mem-enabled: \"true\"\n pyroscope.io/port: \"6060\"\n spec:\n containers:\n - name: vald-readreplica-rotate\n image: \"vdaas/vald-readreplica-rotate:nightly\"\n imagePullPolicy: Always\n volumeMounts:\n - name: vald-readreplica-rotate-config\n mountPath: /etc/server/\n livenessProbe:\n failureThreshold: 2\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n readinessProbe:\n failureThreshold: 2\n httpGet:\n path: /readiness\n port: readiness\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n startupProbe:\n failureThreshold: 30\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 2\n ports:\n - name: liveness\n protocol: TCP\n containerPort: 3000\n - name: readiness\n protocol: TCP\n containerPort: 3001\n - name: grpc\n protocol: TCP\n containerPort: 8081\n - name: pprof\n protocol: TCP\n containerPort: 6060\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n privileged: false\n readOnlyRootFilesystem: true\n runAsGroup: 65532\n runAsNonRoot: true\n runAsUser: 65532\n env:\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: TARGET_READREPLICA_ID_RELEASE_NAME_DEFAULT_VALD\n valueFrom:\n fieldRef:\n fieldPath: metadata.annotations['vald.vdaas.org/target-read-replica-id']\n securityContext:\n fsGroup: 65532\n fsGroupChangePolicy: OnRootMismatch\n runAsGroup: 65532\n runAsNonRoot: true\n runAsUser: 65532\n restartPolicy: OnFailure\n volumes:\n - name: vald-readreplica-rotate-config\n configMap:\n defaultMode: 420\n name: vald-readreplica-rotate-config\n serviceAccountName: vald-readreplica-rotate\n creation:\n apiVersion: batch/v1\n kind: Job\n metadata:\n name: vald-index-creation\n labels:\n app: vald-index-creation\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.13\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-creation\n app.kubernetes.io/version: v1.7.13\n spec:\n ttlSecondsAfterFinished: 86400\n template:\n metadata:\n labels:\n app: vald-index-creation\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.13\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-creation\n app.kubernetes.io/version: v1.7.13\n annotations:\n pyroscope.io/scrape: \"true\"\n pyroscope.io/application-name: vald-index-creation\n pyroscope.io/profile-cpu-enabled: \"true\"\n pyroscope.io/profile-mem-enabled: \"true\"\n pyroscope.io/port: \"6060\"\n spec:\n initContainers:\n - name: wait-for-agent\n image: busybox:stable\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-agent.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for agent to be ready...\"\n sleep 2;\n done\n - name: wait-for-discoverer\n image: busybox:stable\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-discoverer.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for discoverer to be ready...\"\n sleep 2;\n done\n containers:\n - name: vald-index-creation\n image: \"vdaas/vald-index-creation:nightly\"\n imagePullPolicy: Always\n volumeMounts:\n - name: vald-index-creation-config\n mountPath: /etc/server/\n livenessProbe:\n failureThreshold: 2\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n readinessProbe:\n failureThreshold: 2\n httpGet:\n path: /readiness\n port: readiness\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n startupProbe:\n failureThreshold: 30\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 2\n ports:\n - name: liveness\n protocol: TCP\n containerPort: 3000\n - name: readiness\n protocol: TCP\n containerPort: 3001\n - name: grpc\n protocol: TCP\n containerPort: 8081\n - name: pprof\n protocol: TCP\n containerPort: 6060\n env:\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n restartPolicy: OnFailure\n volumes:\n - name: vald-index-creation-config\n configMap:\n defaultMode: 420\n name: vald-index-creation-config\n save:\n apiVersion: batch/v1\n kind: Job\n metadata:\n name: vald-index-save\n labels:\n app: vald-index-save\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.13\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-save\n app.kubernetes.io/version: v1.7.13\n spec:\n ttlSecondsAfterFinished: 86400\n template:\n metadata:\n labels:\n app: vald-index-save\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.13\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-save\n app.kubernetes.io/version: v1.7.13\n annotations:\n pyroscope.io/scrape: \"true\"\n pyroscope.io/application-name: vald-index-save\n pyroscope.io/profile-cpu-enabled: \"true\"\n pyroscope.io/profile-mem-enabled: \"true\"\n pyroscope.io/port: \"6060\"\n spec:\n initContainers:\n - name: wait-for-agent\n image: busybox:stable\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-agent.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for agent to be ready...\"\n sleep 2;\n done\n - name: wait-for-discoverer\n image: busybox:stable\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-discoverer.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for discoverer to be ready...\"\n sleep 2;\n done\n containers:\n - name: vald-index-save\n image: \"vdaas/vald-index-save:nightly\"\n imagePullPolicy: Always\n volumeMounts:\n - name: vald-index-save-config\n mountPath: /etc/server/\n livenessProbe:\n failureThreshold: 2\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n readinessProbe:\n failureThreshold: 2\n httpGet:\n path: /readiness\n port: readiness\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n startupProbe:\n failureThreshold: 30\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 2\n ports:\n - name: liveness\n protocol: TCP\n containerPort: 3000\n - name: readiness\n protocol: TCP\n containerPort: 3001\n - name: grpc\n protocol: TCP\n containerPort: 8081\n - name: pprof\n protocol: TCP\n containerPort: 6060\n env:\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n restartPolicy: OnFailure\n volumes:\n - name: vald-index-save-config\n configMap:\n defaultMode: 420\n name: vald-index-save-config\n correction:\n apiVersion: batch/v1\n kind: Job\n metadata:\n name: vald-index-correction\n labels:\n app: vald-index-correction\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.13\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-correction\n app.kubernetes.io/version: v1.7.13\n spec:\n ttlSecondsAfterFinished: 86400\n template:\n metadata:\n labels:\n app: vald-index-correction\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.13\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-correction\n app.kubernetes.io/version: v1.7.13\n annotations:\n pyroscope.io/scrape: \"true\"\n pyroscope.io/application-name: vald-index-correction\n pyroscope.io/profile-cpu-enabled: \"true\"\n pyroscope.io/profile-mem-enabled: \"true\"\n pyroscope.io/port: \"6060\"\n spec:\n initContainers:\n - name: wait-for-agent\n image: busybox:stable\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-agent.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for agent to be ready...\"\n sleep 2;\n done\n - name: wait-for-discoverer\n image: busybox:stable\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-discoverer.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for discoverer to be ready...\"\n sleep 2;\n done\n containers:\n - name: vald-index-correction\n image: \"vdaas/vald-index-correction:nightly\"\n imagePullPolicy: Always\n volumeMounts:\n - name: vald-index-correction-config\n mountPath: /etc/server/\n livenessProbe:\n failureThreshold: 2\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n readinessProbe:\n failureThreshold: 2\n httpGet:\n path: /readiness\n port: readiness\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n startupProbe:\n failureThreshold: 30\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 2\n ports:\n - name: liveness\n protocol: TCP\n containerPort: 3000\n - name: readiness\n protocol: TCP\n containerPort: 3001\n - name: grpc\n protocol: TCP\n containerPort: 8081\n - name: pprof\n protocol: TCP\n containerPort: 6060\n env:\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n restartPolicy: OnFailure\n volumes:\n - name: vald-index-correction-config\n configMap:\n defaultMode: 420\n name: vald-index-correction-config\n" + config.yaml: "---\nversion: v0.0.0\ntime_zone: UTC\nlogging:\n format: raw\n level: debug\n logger: glg\nserver_config:\n servers:\n - name: grpc\n host: 0.0.0.0\n port: 8081\n grpc:\n bidirectional_stream_concurrency: 20\n connection_timeout: \"\"\n enable_admin: true\n enable_channelz: true\n enable_reflection: true\n header_table_size: 0\n initial_conn_window_size: 2097152\n initial_window_size: 1048576\n interceptors:\n - RecoverInterceptor\n keepalive:\n max_conn_age: \"\"\n max_conn_age_grace: \"\"\n max_conn_idle: \"\"\n min_time: 10m\n permit_without_stream: false\n time: 3h\n timeout: 60s\n max_concurrent_streams: 0\n max_header_list_size: 0\n max_receive_message_size: 0\n max_send_message_size: 0\n num_stream_workers: 0\n read_buffer_size: 0\n shared_write_buffer: false\n wait_for_handlers: true\n write_buffer_size: 0\n mode: GRPC\n network: tcp\n probe_wait_time: 3s\n restart: true\n socket_option:\n ip_recover_destination_addr: false\n ip_transparent: false\n reuse_addr: true\n reuse_port: true\n tcp_cork: false\n tcp_defer_accept: false\n tcp_fast_open: false\n tcp_no_delay: false\n tcp_quick_ack: false\n socket_path: \"\"\n health_check_servers:\n - name: liveness\n host: 0.0.0.0\n port: 3000\n http:\n handler_timeout: \"\"\n http2:\n enabled: false\n handler_limit: 0\n max_concurrent_streams: 0\n max_decoder_header_table_size: 4096\n max_encoder_header_table_size: 4096\n max_read_frame_size: 0\n max_upload_buffer_per_connection: 0\n max_upload_buffer_per_stream: 0\n permit_prohibited_cipher_suites: true\n idle_timeout: \"\"\n read_header_timeout: \"\"\n read_timeout: \"\"\n shutdown_duration: 5s\n write_timeout: \"\"\n mode: REST\n network: tcp\n probe_wait_time: 3s\n restart: true\n socket_option:\n ip_recover_destination_addr: false\n ip_transparent: false\n reuse_addr: true\n reuse_port: true\n tcp_cork: false\n tcp_defer_accept: false\n tcp_fast_open: true\n tcp_no_delay: true\n tcp_quick_ack: true\n socket_path: \"\"\n - name: readiness\n host: 0.0.0.0\n port: 3001\n http:\n handler_timeout: \"\"\n http2:\n enabled: false\n handler_limit: 0\n max_concurrent_streams: 0\n max_decoder_header_table_size: 4096\n max_encoder_header_table_size: 4096\n max_read_frame_size: 0\n max_upload_buffer_per_connection: 0\n max_upload_buffer_per_stream: 0\n permit_prohibited_cipher_suites: true\n idle_timeout: \"\"\n read_header_timeout: \"\"\n read_timeout: \"\"\n shutdown_duration: 0s\n write_timeout: \"\"\n mode: REST\n network: tcp\n probe_wait_time: 3s\n restart: true\n socket_option:\n ip_recover_destination_addr: false\n ip_transparent: false\n reuse_addr: true\n reuse_port: true\n tcp_cork: false\n tcp_defer_accept: false\n tcp_fast_open: true\n tcp_no_delay: true\n tcp_quick_ack: true\n socket_path: \"\"\n metrics_servers:\n - name: pprof\n host: 0.0.0.0\n port: 6060\n http:\n handler_timeout: 5s\n http2:\n enabled: false\n handler_limit: 0\n max_concurrent_streams: 0\n max_decoder_header_table_size: 4096\n max_encoder_header_table_size: 4096\n max_read_frame_size: 0\n max_upload_buffer_per_connection: 0\n max_upload_buffer_per_stream: 0\n permit_prohibited_cipher_suites: true\n idle_timeout: 2s\n read_header_timeout: 1s\n read_timeout: 1s\n shutdown_duration: 5s\n write_timeout: 1m\n mode: REST\n network: tcp\n probe_wait_time: 3s\n restart: true\n socket_option:\n ip_recover_destination_addr: false\n ip_transparent: false\n reuse_addr: true\n reuse_port: true\n tcp_cork: true\n tcp_defer_accept: false\n tcp_fast_open: false\n tcp_no_delay: false\n tcp_quick_ack: false\n socket_path: \"\"\n startup_strategy:\n - liveness\n - pprof\n - grpc\n - readiness\n shutdown_strategy:\n - readiness\n - grpc\n - pprof\n - liveness\n full_shutdown_duration: 600s\n tls:\n ca: /path/to/ca\n cert: /path/to/cert\n enabled: false\n insecure_skip_verify: false\n key: /path/to/key\nobservability:\n enabled: false\n otlp:\n collector_endpoint: \"\"\n trace_batch_timeout: \"1s\"\n trace_export_timeout: \"1m\"\n trace_max_export_batch_size: 1024\n trace_max_queue_size: 256\n metrics_export_interval: \"1s\"\n metrics_export_timeout: \"1m\"\n attribute:\n namespace: \"_MY_POD_NAMESPACE_\"\n pod_name: \"_MY_POD_NAME_\"\n node_name: \"_MY_NODE_NAME_\"\n service_name: \"vald-index-operator\"\n metrics:\n enable_cgo: true\n enable_goroutine: true\n enable_memory: true\n enable_version_info: true\n version_info_labels:\n - vald_version\n - server_name\n - git_commit\n - build_time\n - go_version\n - go_os\n - go_arch\n - algorithm_info\n trace:\n enabled: false\noperator:\n namespace: _MY_POD_NAMESPACE_\n agent_name: vald-agent\n agent_namespace: \n rotator_name: vald-readreplica-rotate\n target_read_replica_id_annotations_key: vald.vdaas.org/target-read-replica-id\n rotation_job_concurrency: 2\n read_replica_enabled: false\n read_replica_label_key: vald-readreplica-id\n job_templates:\n rotate:\n apiVersion: batch/v1\n kind: Job\n metadata:\n name: vald-readreplica-rotate\n labels:\n app: vald-readreplica-rotate\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.16\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-readreplica-rotate\n app.kubernetes.io/version: v1.7.16\n spec:\n ttlSecondsAfterFinished: 86400\n template:\n metadata:\n labels:\n app: vald-readreplica-rotate\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.16\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-readreplica-rotate\n app.kubernetes.io/version: v1.7.16\n annotations:\n pyroscope.io/scrape: \"true\"\n pyroscope.io/application-name: vald-readreplica-rotate\n pyroscope.io/profile-cpu-enabled: \"true\"\n pyroscope.io/profile-mem-enabled: \"true\"\n pyroscope.io/port: \"6060\"\n spec:\n containers:\n - name: vald-readreplica-rotate\n image: \"vdaas/vald-readreplica-rotate:nightly\"\n imagePullPolicy: Always\n volumeMounts:\n - name: vald-readreplica-rotate-config\n mountPath: /etc/server/\n livenessProbe:\n failureThreshold: 2\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n readinessProbe:\n failureThreshold: 2\n httpGet:\n path: /readiness\n port: readiness\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n startupProbe:\n failureThreshold: 30\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 2\n ports:\n - name: liveness\n protocol: TCP\n containerPort: 3000\n - name: readiness\n protocol: TCP\n containerPort: 3001\n - name: grpc\n protocol: TCP\n containerPort: 8081\n - name: pprof\n protocol: TCP\n containerPort: 6060\n securityContext:\n allowPrivilegeEscalation: false\n capabilities:\n drop:\n - ALL\n privileged: false\n readOnlyRootFilesystem: true\n runAsGroup: 65532\n runAsNonRoot: true\n runAsUser: 65532\n env:\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n - name: TARGET_READREPLICA_ID_RELEASE_NAME_DEFAULT_VALD\n valueFrom:\n fieldRef:\n fieldPath: metadata.annotations['vald.vdaas.org/target-read-replica-id']\n securityContext:\n fsGroup: 65532\n fsGroupChangePolicy: OnRootMismatch\n runAsGroup: 65532\n runAsNonRoot: true\n runAsUser: 65532\n restartPolicy: OnFailure\n volumes:\n - name: vald-readreplica-rotate-config\n configMap:\n defaultMode: 420\n name: vald-readreplica-rotate-config\n serviceAccountName: vald-readreplica-rotate\n creation:\n apiVersion: batch/v1\n kind: Job\n metadata:\n name: vald-index-creation\n labels:\n app: vald-index-creation\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.16\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-creation\n app.kubernetes.io/version: v1.7.16\n spec:\n ttlSecondsAfterFinished: 86400\n template:\n metadata:\n labels:\n app: vald-index-creation\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.16\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-creation\n app.kubernetes.io/version: v1.7.16\n annotations:\n pyroscope.io/scrape: \"true\"\n pyroscope.io/application-name: vald-index-creation\n pyroscope.io/profile-cpu-enabled: \"true\"\n pyroscope.io/profile-mem-enabled: \"true\"\n pyroscope.io/port: \"6060\"\n spec:\n initContainers:\n - name: wait-for-agent\n image: busybox:stable\n imagePullPolicy: Always\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-agent.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for agent to be ready...\"\n sleep 2;\n done\n - name: wait-for-discoverer\n image: busybox:stable\n imagePullPolicy: Always\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-discoverer.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for discoverer to be ready...\"\n sleep 2;\n done\n affinity:\n nodeAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n podAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n requiredDuringSchedulingIgnoredDuringExecution: []\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n requiredDuringSchedulingIgnoredDuringExecution: []\n containers:\n - name: vald-index-creation\n image: \"vdaas/vald-index-creation:nightly\"\n imagePullPolicy: Always\n volumeMounts:\n - name: vald-index-creation-config\n mountPath: /etc/server/\n livenessProbe:\n failureThreshold: 2\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n readinessProbe:\n failureThreshold: 2\n httpGet:\n path: /readiness\n port: readiness\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n startupProbe:\n failureThreshold: 30\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 2\n ports:\n - name: liveness\n protocol: TCP\n containerPort: 3000\n - name: readiness\n protocol: TCP\n containerPort: 3001\n - name: grpc\n protocol: TCP\n containerPort: 8081\n - name: pprof\n protocol: TCP\n containerPort: 6060\n env:\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n restartPolicy: OnFailure\n volumes:\n - name: vald-index-creation-config\n configMap:\n defaultMode: 420\n name: vald-index-creation-config\n save:\n apiVersion: batch/v1\n kind: Job\n metadata:\n name: vald-index-save\n labels:\n app: vald-index-save\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.16\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-save\n app.kubernetes.io/version: v1.7.16\n spec:\n ttlSecondsAfterFinished: 86400\n template:\n metadata:\n labels:\n app: vald-index-save\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.16\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-save\n app.kubernetes.io/version: v1.7.16\n annotations:\n pyroscope.io/scrape: \"true\"\n pyroscope.io/application-name: vald-index-save\n pyroscope.io/profile-cpu-enabled: \"true\"\n pyroscope.io/profile-mem-enabled: \"true\"\n pyroscope.io/port: \"6060\"\n spec:\n initContainers:\n - name: wait-for-agent\n image: busybox:stable\n imagePullPolicy: Always\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-agent.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for agent to be ready...\"\n sleep 2;\n done\n - name: wait-for-discoverer\n image: busybox:stable\n imagePullPolicy: Always\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-discoverer.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for discoverer to be ready...\"\n sleep 2;\n done\n affinity:\n nodeAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n podAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n requiredDuringSchedulingIgnoredDuringExecution: []\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n requiredDuringSchedulingIgnoredDuringExecution: []\n containers:\n - name: vald-index-save\n image: \"vdaas/vald-index-save:nightly\"\n imagePullPolicy: Always\n volumeMounts:\n - name: vald-index-save-config\n mountPath: /etc/server/\n livenessProbe:\n failureThreshold: 2\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n readinessProbe:\n failureThreshold: 2\n httpGet:\n path: /readiness\n port: readiness\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n startupProbe:\n failureThreshold: 30\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 2\n ports:\n - name: liveness\n protocol: TCP\n containerPort: 3000\n - name: readiness\n protocol: TCP\n containerPort: 3001\n - name: grpc\n protocol: TCP\n containerPort: 8081\n - name: pprof\n protocol: TCP\n containerPort: 6060\n env:\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n restartPolicy: OnFailure\n volumes:\n - name: vald-index-save-config\n configMap:\n defaultMode: 420\n name: vald-index-save-config\n correction:\n apiVersion: batch/v1\n kind: Job\n metadata:\n name: vald-index-correction\n labels:\n app: vald-index-correction\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.16\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-correction\n app.kubernetes.io/version: v1.7.16\n spec:\n ttlSecondsAfterFinished: 86400\n template:\n metadata:\n labels:\n app: vald-index-correction\n app.kubernetes.io/name: vald\n helm.sh/chart: vald-v1.7.16\n app.kubernetes.io/managed-by: Helm\n app.kubernetes.io/instance: release-name\n app.kubernetes.io/component: vald-index-correction\n app.kubernetes.io/version: v1.7.16\n annotations:\n pyroscope.io/scrape: \"true\"\n pyroscope.io/application-name: vald-index-correction\n pyroscope.io/profile-cpu-enabled: \"true\"\n pyroscope.io/profile-mem-enabled: \"true\"\n pyroscope.io/port: \"6060\"\n spec:\n initContainers:\n - name: wait-for-agent\n image: busybox:stable\n imagePullPolicy: Always\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-agent.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for agent to be ready...\"\n sleep 2;\n done\n - name: wait-for-discoverer\n image: busybox:stable\n imagePullPolicy: Always\n command:\n - /bin/sh\n - -e\n - -c\n - |\n until [ \"$(wget --server-response --spider --quiet http://vald-discoverer.default.svc.cluster.local:3001/readiness 2>&1 | awk 'NR==1{print $2}')\" == \"200\" ]; do\n echo \"waiting for discoverer to be ready...\"\n sleep 2;\n done\n affinity:\n nodeAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n podAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n requiredDuringSchedulingIgnoredDuringExecution: []\n podAntiAffinity:\n preferredDuringSchedulingIgnoredDuringExecution: []\n requiredDuringSchedulingIgnoredDuringExecution: []\n containers:\n - name: vald-index-correction\n image: \"vdaas/vald-index-correction:nightly\"\n imagePullPolicy: Always\n volumeMounts:\n - name: vald-index-correction-config\n mountPath: /etc/server/\n livenessProbe:\n failureThreshold: 2\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n readinessProbe:\n failureThreshold: 2\n httpGet:\n path: /readiness\n port: readiness\n scheme: HTTP\n initialDelaySeconds: 10\n periodSeconds: 3\n successThreshold: 1\n timeoutSeconds: 2\n startupProbe:\n failureThreshold: 30\n httpGet:\n path: /liveness\n port: liveness\n scheme: HTTP\n initialDelaySeconds: 5\n periodSeconds: 5\n successThreshold: 1\n timeoutSeconds: 2\n ports:\n - name: liveness\n protocol: TCP\n containerPort: 3000\n - name: readiness\n protocol: TCP\n containerPort: 3001\n - name: grpc\n protocol: TCP\n containerPort: 8081\n - name: pprof\n protocol: TCP\n containerPort: 6060\n env:\n - name: MY_NODE_NAME\n valueFrom:\n fieldRef:\n fieldPath: spec.nodeName\n - name: MY_POD_NAME\n valueFrom:\n fieldRef:\n fieldPath: metadata.name\n - name: MY_POD_NAMESPACE\n valueFrom:\n fieldRef:\n fieldPath: metadata.namespace\n restartPolicy: OnFailure\n volumes:\n - name: vald-index-correction-config\n configMap:\n defaultMode: 420\n name: vald-index-correction-config\n" diff --git a/k8s/index/operator/deployment.yaml b/k8s/index/operator/deployment.yaml index a70308c6b0..e040e900f7 100644 --- a/k8s/index/operator/deployment.yaml +++ b/k8s/index/operator/deployment.yaml @@ -20,10 +20,10 @@ metadata: labels: app: vald-index-operator app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: index-operator spec: progressDeadlineSeconds: 600 @@ -46,7 +46,7 @@ spec: app.kubernetes.io/instance: release-name app.kubernetes.io/component: operator annotations: - checksum/configmap: c9c0a97792fa0594fb6ae3946f4d7294a9e2fbc782b48b82272db25754ddc5ff + checksum/configmap: d01561a89777833e0468cc6a162172b5838979c9e91114d0b3517e5a6c115798 pyroscope.io/scrape: "true" pyroscope.io/application-name: vald-index-operator pyroscope.io/profile-cpu-enabled: "true" diff --git a/k8s/index/operator/priorityclass.yaml b/k8s/index/operator/priorityclass.yaml index 4a7effaf61..1178908879 100644 --- a/k8s/index/operator/priorityclass.yaml +++ b/k8s/index/operator/priorityclass.yaml @@ -19,10 +19,10 @@ metadata: name: default-vald-index-operator-priority labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: index-operator value: 1e+06 preemptionPolicy: Never diff --git a/k8s/manager/index/configmap.yaml b/k8s/manager/index/configmap.yaml index 0b6d03a1c6..1d212617ac 100644 --- a/k8s/manager/index/configmap.yaml +++ b/k8s/manager/index/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-manager-index-config labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: manager-index data: config.yaml: | @@ -42,6 +42,7 @@ data: bidirectional_stream_concurrency: 20 connection_timeout: "" enable_admin: true + enable_channelz: true enable_reflection: true header_table_size: 0 initial_conn_window_size: 2097152 @@ -56,10 +57,14 @@ data: permit_without_stream: false time: 3h timeout: 60s + max_concurrent_streams: 0 max_header_list_size: 0 max_receive_message_size: 0 max_send_message_size: 0 + num_stream_workers: 0 read_buffer_size: 0 + shared_write_buffer: false + wait_for_handlers: true write_buffer_size: 0 mode: GRPC network: tcp @@ -264,16 +269,20 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 wait_for_ready: true dial_option: + authority: "" backoff_base_delay: 1s backoff_jitter: 0.2 backoff_max_delay: 120s backoff_multiplier: 1.6 + disable_retry: false enable_backoff: false + idle_timeout: 1h initial_connection_window_size: 2097152 initial_window_size: 1048576 insecure: true @@ -282,6 +291,8 @@ data: permit_without_stream: false time: "" timeout: 30s + max_call_attempts: 0 + max_header_list_size: 0 max_msg_size: 0 min_connection_timeout: 20s net: @@ -293,6 +304,7 @@ data: cache_enabled: true cache_expiration: 1h refresh_duration: 30m + network: tcp socket_option: ip_recover_destination_addr: false ip_transparent: false @@ -310,7 +322,9 @@ data: insecure_skip_verify: false key: /path/to/key read_buffer_size: 0 + shared_write_buffer: false timeout: "" + user_agent: Vald-gRPC write_buffer_size: 0 tls: ca: /path/to/ca @@ -342,6 +356,7 @@ data: min_samples: 1000 open_timeout: 1s call_option: + content_subtype: "" max_recv_msg_size: 0 max_retry_rpc_buffer_size: 0 max_send_msg_size: 0 diff --git a/k8s/manager/index/deployment.yaml b/k8s/manager/index/deployment.yaml index c17b7b5ac2..51faa5de42 100644 --- a/k8s/manager/index/deployment.yaml +++ b/k8s/manager/index/deployment.yaml @@ -20,10 +20,10 @@ metadata: labels: app: vald-manager-index app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: manager-index spec: progressDeadlineSeconds: 600 @@ -46,7 +46,7 @@ spec: app.kubernetes.io/instance: release-name app.kubernetes.io/component: manager-index annotations: - checksum/configmap: 876907cfbfbcab80cd72c01874d8651958d9dfe9e5a8e3474ecb3afd2e62dbda + checksum/configmap: 8aebb249a588793313c046dff60dc9a72a7fe00d3da7cf0575f2d35a2b788158 profefe.com/enable: "true" profefe.com/port: "6060" profefe.com/service: vald-manager-index @@ -59,6 +59,7 @@ spec: initContainers: - name: wait-for-agent image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e @@ -70,6 +71,7 @@ spec: done - name: wait-for-discoverer image: busybox:stable + imagePullPolicy: Always command: - /bin/sh - -e diff --git a/k8s/manager/index/pdb.yaml b/k8s/manager/index/pdb.yaml index 54e980cfc4..00d115388b 100644 --- a/k8s/manager/index/pdb.yaml +++ b/k8s/manager/index/pdb.yaml @@ -19,13 +19,14 @@ metadata: name: vald-manager-index labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: manager-index spec: maxUnavailable: 50% selector: matchLabels: app: vald-manager-index + unhealthyPodEvictionPolicy: AlwaysAllow diff --git a/k8s/manager/index/priorityclass.yaml b/k8s/manager/index/priorityclass.yaml index 8efe09044a..a9807ab633 100644 --- a/k8s/manager/index/priorityclass.yaml +++ b/k8s/manager/index/priorityclass.yaml @@ -19,10 +19,10 @@ metadata: name: default-vald-manager-index-priority labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: manager-index value: 1e+06 globalDefault: false diff --git a/k8s/manager/index/svc.yaml b/k8s/manager/index/svc.yaml index 0fb5e62aad..bc03aac73f 100644 --- a/k8s/manager/index/svc.yaml +++ b/k8s/manager/index/svc.yaml @@ -19,10 +19,10 @@ metadata: name: vald-manager-index labels: app.kubernetes.io/name: vald - helm.sh/chart: vald-v1.7.13 + helm.sh/chart: vald-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: manager-index spec: ports: diff --git a/k8s/metrics/loki/loki.yaml b/k8s/metrics/loki/loki.yaml index 2ea59e255d..10d2abe1a8 100644 --- a/k8s/metrics/loki/loki.yaml +++ b/k8s/metrics/loki/loki.yaml @@ -31,7 +31,7 @@ spec: - args: - -config.file=/etc/loki/loki.yaml image: grafana/loki:2.0.0 - imagePullPolicy: IfNotPresent + imagePullPolicy: Always livenessProbe: httpGet: path: /ready diff --git a/k8s/metrics/loki/promtail.yaml b/k8s/metrics/loki/promtail.yaml index f650c07614..c5687462b0 100644 --- a/k8s/metrics/loki/promtail.yaml +++ b/k8s/metrics/loki/promtail.yaml @@ -33,14 +33,14 @@ spec: spec: containers: - args: - - -config.file=/etc/promtail/promtail.yml + - -config.file=/etc/promtail/promtail.yaml env: - name: HOSTNAME valueFrom: fieldRef: fieldPath: spec.nodeName image: grafana/promtail:1.5.0 - imagePullPolicy: IfNotPresent + imagePullPolicy: Always name: promtail ports: - containerPort: 80 @@ -120,7 +120,7 @@ kind: ConfigMap metadata: name: promtail data: - promtail.yml: | + promtail.yaml: | clients: - external_labels: cluster: vald diff --git a/k8s/metrics/profefe/cronjob.yaml b/k8s/metrics/profefe/cronjob.yaml index e9491ce4d9..d02d1d784e 100644 --- a/k8s/metrics/profefe/cronjob.yaml +++ b/k8s/metrics/profefe/cronjob.yaml @@ -30,7 +30,7 @@ spec: - --profefe-hostport - http://profefe:10100 image: profefe/kprofefe:latest - imagePullPolicy: IfNotPresent + imagePullPolicy: Always name: kprofefe restartPolicy: Never serviceAccount: kprofefe diff --git a/k8s/metrics/pyroscope/base/deployment.yaml b/k8s/metrics/pyroscope/base/deployment.yaml index 609f13bd52..3632aa349a 100644 --- a/k8s/metrics/pyroscope/base/deployment.yaml +++ b/k8s/metrics/pyroscope/base/deployment.yaml @@ -48,7 +48,7 @@ spec: - "/tmp/config.yaml" securityContext: {} image: "pyroscope/pyroscope:latest" - imagePullPolicy: IfNotPresent + imagePullPolicy: Always ports: - name: api containerPort: 4040 diff --git a/k8s/metrics/tempo/tempo.yaml b/k8s/metrics/tempo/tempo.yaml index c34dfab909..2bedaad825 100644 --- a/k8s/metrics/tempo/tempo.yaml +++ b/k8s/metrics/tempo/tempo.yaml @@ -73,7 +73,7 @@ spec: - name: JAEGER_AGENT_PORT value: "" image: grafana/tempo:latest - imagePullPolicy: IfNotPresent + imagePullPolicy: Always name: tempo ports: - containerPort: 3100 @@ -91,7 +91,7 @@ spec: - name: JAEGER_AGENT_PORT value: "" image: grafana/tempo-query:latest - imagePullPolicy: IfNotPresent + imagePullPolicy: Always name: tempo-query ports: - containerPort: 16686 diff --git a/k8s/operator/helm/crds/valdhelmoperatorrelease.yaml b/k8s/operator/helm/crds/valdhelmoperatorrelease.yaml index f94a4eeaa9..f4ecaef4cf 100644 --- a/k8s/operator/helm/crds/valdhelmoperatorrelease.yaml +++ b/k8s/operator/helm/crds/valdhelmoperatorrelease.yaml @@ -62,7 +62,6 @@ spec: x-kubernetes-preserve-unknown-fields: true # schema of spec must be generated by hack/helm/schema/crd/main.go. spec: - type: object properties: affinity: type: object @@ -77,29 +76,27 @@ spec: healthPort: type: integer image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object leaderElectionID: type: string livenessProbe: - type: object properties: enabled: type: boolean failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -107,6 +104,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -115,26 +113,27 @@ spec: type: integer timeoutSeconds: type: integer - logging: type: object + logging: properties: format: - type: string enum: - console - json - level: type: string + level: enum: - debug - info - error - stacktraceLevel: type: string + stacktraceLevel: enum: - debug - info - error + type: string + type: object maxConcurrentReconciles: type: integer metricsPort: @@ -153,21 +152,19 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true rbac: - type: object properties: create: type: boolean name: type: string - readinessProbe: type: object + readinessProbe: properties: enabled: type: boolean failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -175,6 +172,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -183,6 +181,7 @@ spec: type: integer timeoutSeconds: type: integer + type: object reconcilePeriod: type: string replicas: @@ -194,7 +193,6 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true service: - type: object properties: annotations: type: object @@ -207,22 +205,24 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: - type: string enum: - ClusterIP - LoadBalancer - NodePort - serviceAccount: + type: string type: object + serviceAccount: properties: create: type: boolean name: type: string + type: object tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array watchNamespaces: type: string + type: object diff --git a/k8s/operator/helm/crds/valdrelease.yaml b/k8s/operator/helm/crds/valdrelease.yaml index 07a12af69c..58b6811818 100644 --- a/k8s/operator/helm/crds/valdrelease.yaml +++ b/k8s/operator/helm/crds/valdrelease.yaml @@ -66,89 +66,86 @@ spec: x-kubernetes-preserve-unknown-fields: true # schema of spec must be generated by hack/helm/schema/crd/main.go. spec: - type: object properties: agent: - type: object properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object algorithm: - type: string enum: - ngt - faiss + type: string annotations: type: object x-kubernetes-preserve-unknown-fields: true clusterRole: - type: object properties: enabled: type: boolean name: type: string - clusterRoleBinding: type: object + clusterRoleBinding: properties: enabled: type: boolean name: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string faiss: - type: object properties: auto_index_check_duration: type: string @@ -159,8 +156,8 @@ spec: auto_save_index_duration: type: string dimension: - type: integer minimum: 1 + type: integer enable_copy_on_write: type: boolean enable_in_memory_mode: @@ -172,10 +169,10 @@ spec: initial_delay_max_duration: type: string kvsdb: - type: object properties: concurrency: type: integer + type: object load_index_timeout_factor: type: string m: @@ -183,15 +180,15 @@ spec: max_load_index_timeout: type: string method_type: - type: string enum: - ivfpq - binaryindex - metric_type: type: string + metric_type: enum: - innerproduct - l2 + type: string min_load_index_timeout: type: string namespace: @@ -203,76 +200,76 @@ spec: pod_name: type: string vqueue: - type: object properties: delete_buffer_pool_size: type: integer insert_buffer_pool_size: type: integer - hpa: + type: object type: object + hpa: properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array kind: - type: string enum: - StatefulSet - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string ngt: - type: object properties: auto_create_index_pool_size: type: integer @@ -285,8 +282,8 @@ spec: auto_save_index_duration: type: string broken_index_history_limit: - type: integer minimum: 0 + type: integer bulk_insert_chunk_size: type: integer creation_edge_size: @@ -298,10 +295,9 @@ spec: default_radius: type: number dimension: - type: integer minimum: 1 + type: integer distance_type: - type: string enum: - l1 - l2 @@ -329,6 +325,7 @@ spec: - innerproduct - dp - ip + type: string enable_copy_on_write: type: boolean enable_export_index_info_to_k8s: @@ -340,8 +337,8 @@ spec: enable_statistics: type: boolean error_buffer_limit: - type: integer minimum: 1 + type: integer export_index_info_duration: type: string index_path: @@ -349,10 +346,10 @@ spec: initial_delay_max_duration: type: string kvsdb: - type: object properties: concurrency: type: integer + type: object load_index_timeout_factor: type: string max_load_index_timeout: @@ -362,34 +359,33 @@ spec: namespace: type: string object_type: - type: string enum: - float - float16 - uint8 + type: string pod_name: type: string search_edge_size: type: integer vqueue: - type: object properties: delete_buffer_pool_size: type: integer insert_buffer_pool_size: type: integer + type: object + type: object nodeName: type: string nodeSelector: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -400,9 +396,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -414,11 +408,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -428,6 +423,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -442,13 +438,14 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean - persistentVolume: + type: object type: object + persistentVolume: properties: accessMode: type: string @@ -460,62 +457,62 @@ spec: type: string storageClass: type: string + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podManagementPolicy: - type: string enum: - OrderedReady - Parallel + type: string podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer readreplica: - type: object properties: component_name: type: string enabled: type: boolean hpa: - type: object properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer + type: object label_key: type: string maxReplicas: - type: integer minimum: 1 - minReplicas: type: integer + minReplicas: minimum: 1 + type: integer name: type: string service: - type: object properties: annotations: type: object x-kubernetes-preserve-unknown-fields: true + type: object snapshot_classname: type: string volume_name: type: string - resources: type: object + resources: properties: limits: type: object @@ -523,11 +520,11 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string @@ -535,31 +532,27 @@ spec: type: string partition: type: integer + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -567,6 +560,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -575,20 +569,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -608,6 +600,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -618,10 +611,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -632,12 +625,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -657,30 +650,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -688,6 +681,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -696,16 +690,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -725,6 +717,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -735,10 +728,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -749,12 +742,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -774,28 +767,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -803,6 +796,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -811,30 +805,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -854,6 +846,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -864,10 +857,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -878,12 +871,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -903,31 +896,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -935,6 +928,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -944,16 +939,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -969,20 +963,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -993,12 +996,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1018,33 +1021,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -1064,6 +1066,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -1074,10 +1077,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1088,12 +1091,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1113,14 +1116,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -1132,8 +1138,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -1141,44 +1148,41 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - serviceAccount: type: object + serviceAccount: properties: enabled: type: boolean name: type: string + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string sidecar: - type: object properties: config: - type: object properties: auto_backup_duration: type: string auto_backup_enabled: type: boolean blob_storage: - type: object properties: bucket: type: string cloud_storage: - type: object properties: client: - type: object properties: credentials_file_path: type: string credentials_json: type: string + type: object url: type: string write_buffer_size: @@ -1193,8 +1197,8 @@ spec: type: string write_content_type: type: string - s3: type: object + s3: properties: access_key: type: string @@ -1215,11 +1219,11 @@ spec: force_path_style: type: boolean max_chunk_size: - type: string pattern: ^[0-9]+(kb|mb|gb)$ - max_part_size: type: string + max_part_size: pattern: ^[0-9]+(kb|mb|gb)$ + type: string max_retries: type: integer region: @@ -1234,19 +1238,18 @@ spec: type: boolean use_dual_stack: type: boolean + type: object storage_type: - type: string enum: - s3 - cloud_storage - client: + type: string type: object + client: properties: net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -1254,8 +1257,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -1263,8 +1266,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -1284,8 +1293,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -1297,11 +1306,11 @@ spec: type: boolean key: type: string - transport: + type: object type: object + transport: properties: backoff: - type: object properties: backoff_factor: type: number @@ -1317,8 +1326,8 @@ spec: type: string retry_count: type: integer - round_tripper: type: object + round_tripper: properties: expect_continue_timeout: type: string @@ -1342,18 +1351,21 @@ spec: type: string write_buffer_size: type: integer - compress: + type: object + type: object type: object + compress: properties: compress_algorithm: - type: string enum: - gob - gzip - lz4 - zstd + type: string compression_level: type: integer + type: object filename: type: string filename_suffix: @@ -1361,7 +1373,6 @@ spec: post_stop_timeout: type: string restore_backoff: - type: object properties: backoff_factor: type: number @@ -1377,62 +1388,62 @@ spec: type: string retry_count: type: integer + type: object restore_backoff_enabled: type: boolean watch_enabled: type: boolean + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainerEnabled: type: boolean logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object name: type: string observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -1443,9 +1454,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -1457,11 +1466,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -1471,6 +1481,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -1485,13 +1496,14 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean - resources: + type: object type: object + resources: properties: limits: type: object @@ -1499,28 +1511,24 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true - server_config: type: object + server_config: properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -1528,6 +1536,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -1536,20 +1545,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -1569,6 +1576,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -1579,10 +1587,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1593,12 +1601,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1618,30 +1626,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -1649,6 +1657,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -1657,16 +1666,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -1686,6 +1693,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -1696,10 +1704,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1710,12 +1718,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1735,28 +1743,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -1764,6 +1772,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -1772,30 +1781,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -1815,6 +1822,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -1825,10 +1833,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1839,12 +1847,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1864,31 +1872,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -1896,6 +1904,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -1905,16 +1915,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -1930,20 +1939,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -1954,12 +1972,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -1979,33 +1997,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2025,6 +2042,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2035,10 +2053,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2049,12 +2067,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2074,14 +2092,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -2093,8 +2114,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -2107,59 +2129,63 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string + type: object time_zone: type: string version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string + type: object terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - defaults: + type: array type: object + defaults: properties: grpc: - type: object properties: client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -2175,11 +2201,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -2191,8 +2217,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -2204,9 +2230,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -2215,8 +2245,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -2224,13 +2258,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -2238,15 +2272,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -2254,8 +2291,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -2263,8 +2300,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -2284,8 +2327,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -2297,12 +2340,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -2312,7 +2362,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -2324,59 +2373,60 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - image: + type: object type: object + image: properties: tag: type: string - logging: type: object + logging: properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap - networkPolicy: + type: string type: object + networkPolicy: properties: custom: - type: object properties: egress: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - ingress: type: array + ingress: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object enabled: type: boolean - observability: type: object + observability: properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -2387,9 +2437,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -2401,11 +2449,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -2415,6 +2464,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -2429,33 +2479,30 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean - server_config: + type: object type: object + server_config: properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -2463,6 +2510,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -2471,20 +2519,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2504,6 +2550,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2514,10 +2561,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2528,12 +2575,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2553,30 +2600,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -2584,6 +2631,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -2592,16 +2640,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2621,6 +2667,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2631,10 +2678,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2645,12 +2692,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2670,28 +2717,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -2699,6 +2746,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -2707,30 +2755,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2750,6 +2796,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2760,10 +2807,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2774,12 +2821,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2799,31 +2846,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -2831,6 +2878,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -2840,16 +2889,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -2865,20 +2913,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2889,12 +2946,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -2914,33 +2971,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -2960,6 +3016,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -2970,10 +3027,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -2984,12 +3041,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3009,14 +3066,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -3028,75 +3088,76 @@ spec: type: boolean key: type: string + type: object + type: object time_zone: type: string - discoverer: type: object + discoverer: properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true clusterRole: - type: object properties: enabled: type: boolean name: type: string - clusterRoleBinding: type: object + clusterRoleBinding: properties: enabled: type: boolean name: type: string - discoverer: type: object + discoverer: properties: discovery_duration: type: string @@ -3105,10 +3166,8 @@ spec: namespace: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -3116,8 +3175,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -3125,8 +3184,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -3146,8 +3211,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -3159,11 +3224,11 @@ spec: type: boolean key: type: string - selectors: + type: object type: object + selectors: properties: node: - type: object properties: fields: type: object @@ -3171,8 +3236,8 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - node_metrics: type: object + node_metrics: properties: fields: type: object @@ -3180,8 +3245,8 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - pod: type: object + pod: properties: fields: type: object @@ -3189,8 +3254,8 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - pod_metrics: type: object + pod_metrics: properties: fields: type: object @@ -3198,8 +3263,8 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - service: type: object + service: properties: fields: type: object @@ -3207,76 +3272,79 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true + type: object + type: object + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string hpa: - type: object properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array internalTrafficPolicy: type: string kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string nodeName: @@ -3285,12 +3353,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -3301,9 +3367,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -3315,11 +3379,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -3329,6 +3394,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -3343,28 +3409,29 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer resources: - type: object properties: limits: type: object @@ -3372,41 +3439,37 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -3414,6 +3477,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -3422,20 +3486,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -3455,6 +3517,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -3465,10 +3528,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3479,12 +3542,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3504,30 +3567,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -3535,6 +3598,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -3543,16 +3607,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -3572,6 +3634,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -3582,10 +3645,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3596,12 +3659,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3621,28 +3684,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -3650,6 +3713,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -3658,30 +3722,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -3701,6 +3763,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -3711,10 +3774,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3725,12 +3788,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3750,31 +3813,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -3782,6 +3845,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -3791,16 +3856,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -3816,20 +3880,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3840,12 +3913,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3865,33 +3938,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -3911,6 +3983,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -3921,10 +3994,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -3935,12 +4008,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -3960,14 +4033,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -3979,8 +4055,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -3988,125 +4065,126 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - serviceAccount: type: object + serviceAccount: properties: enabled: type: boolean name: type: string + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - gateway: + type: array type: object + gateway: properties: filter: - type: object properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string gateway_config: - type: object properties: egress_filter: - type: object properties: client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -4122,11 +4200,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -4138,8 +4216,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -4151,9 +4229,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -4162,8 +4244,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -4171,13 +4257,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -4185,15 +4271,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -4201,8 +4290,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -4210,8 +4299,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -4231,8 +4326,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -4244,12 +4339,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -4259,7 +4361,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -4271,25 +4372,26 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object distance_filters: - type: array items: type: string - object_filters: type: array + object_filters: items: type: string - gateway_client: + type: array type: object + gateway_client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -4305,11 +4407,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -4321,8 +4423,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -4334,9 +4436,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -4345,8 +4451,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -4354,13 +4464,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -4368,15 +4478,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -4384,8 +4497,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -4393,8 +4506,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -4414,8 +4533,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -4427,12 +4546,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -4442,7 +4568,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -4454,20 +4579,19 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - ingress_filter: type: object + ingress_filter: properties: client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -4483,11 +4607,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -4499,8 +4623,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -4512,9 +4636,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -4523,8 +4651,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -4532,13 +4664,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -4546,15 +4678,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -4562,8 +4697,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -4571,8 +4706,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -4592,8 +4733,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -4605,12 +4746,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -4620,7 +4768,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -4632,57 +4779,60 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object insert_filters: - type: array items: type: string - search_filters: type: array + search_filters: items: type: string - update_filters: type: array + update_filters: items: type: string - upsert_filters: type: array + upsert_filters: items: type: string + type: array vectorizer: type: string - hpa: + type: object type: object + hpa: properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - ingress: type: object + ingress: properties: annotations: type: object x-kubernetes-preserve-unknown-fields: true defaultBackend: - type: object properties: enabled: type: boolean + type: object enabled: type: boolean host: @@ -4691,47 +4841,48 @@ spec: type: string servicePort: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array internalTrafficPolicy: type: string kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string nodeName: @@ -4740,12 +4891,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -4756,9 +4905,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -4770,11 +4917,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -4784,6 +4932,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -4798,28 +4947,29 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer resources: - type: object properties: limits: type: object @@ -4827,41 +4977,37 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -4869,6 +5015,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -4877,20 +5024,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -4910,6 +5055,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -4920,10 +5066,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -4934,12 +5080,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -4959,30 +5105,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -4990,6 +5136,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -4998,16 +5145,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -5027,6 +5172,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -5037,10 +5183,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -5051,12 +5197,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -5076,28 +5222,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -5105,6 +5251,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -5113,30 +5260,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -5156,6 +5301,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -5166,10 +5312,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -5180,12 +5326,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -5205,31 +5351,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -5237,6 +5383,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -5246,16 +5394,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -5271,20 +5418,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -5295,12 +5451,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -5320,33 +5476,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -5366,6 +5521,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -5376,10 +5532,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -5390,12 +5546,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -5415,14 +5571,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -5434,8 +5593,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -5443,117 +5603,119 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - lb: + type: array type: object + lb: properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string gateway_config: - type: object properties: agent_namespace: type: string discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -5569,11 +5731,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -5585,8 +5747,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -5598,9 +5760,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -5609,8 +5775,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -5618,13 +5788,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -5632,15 +5802,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -5648,8 +5821,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -5657,8 +5830,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -5678,8 +5857,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -5691,12 +5870,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -5706,7 +5892,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -5718,17 +5903,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -5744,11 +5929,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -5760,8 +5945,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -5773,9 +5958,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -5784,8 +5973,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -5793,13 +5986,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -5807,15 +6000,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -5823,8 +6019,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -5832,8 +6028,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -5853,8 +6055,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -5866,12 +6068,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -5881,7 +6090,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -5893,19 +6101,19 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string read_client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -5921,11 +6129,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -5937,8 +6145,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -5950,9 +6158,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -5961,8 +6173,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -5970,13 +6186,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -5984,15 +6200,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -6000,8 +6219,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -6009,8 +6228,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -6030,8 +6255,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -6043,12 +6268,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -6058,7 +6290,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -6070,47 +6301,50 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object + type: object index_replica: - type: integer minimum: 1 - multi_operation_concurrency: type: integer + multi_operation_concurrency: minimum: 2 + type: integer node_name: type: string - hpa: type: object + hpa: properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - ingress: type: object + ingress: properties: annotations: type: object x-kubernetes-preserve-unknown-fields: true defaultBackend: - type: object properties: enabled: type: boolean + type: object enabled: type: boolean host: @@ -6119,47 +6353,48 @@ spec: type: string servicePort: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array internalTrafficPolicy: type: string kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string nodeName: @@ -6168,12 +6403,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -6184,9 +6417,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -6198,11 +6429,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -6212,6 +6444,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -6226,28 +6459,29 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer resources: - type: object properties: limits: type: object @@ -6255,41 +6489,37 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -6297,6 +6527,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -6305,20 +6536,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -6338,6 +6567,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -6348,10 +6578,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6362,12 +6592,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6387,30 +6617,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -6418,6 +6648,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -6426,16 +6657,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -6455,6 +6684,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -6465,10 +6695,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6479,12 +6709,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6504,28 +6734,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -6533,6 +6763,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -6541,30 +6772,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -6584,6 +6813,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -6594,10 +6824,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6608,12 +6838,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6633,31 +6863,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -6665,6 +6895,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -6674,16 +6906,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -6699,20 +6930,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6723,12 +6963,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6748,33 +6988,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -6794,6 +7033,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -6804,10 +7044,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -6818,12 +7058,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -6843,14 +7083,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -6862,8 +7105,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -6871,126 +7115,129 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - mirror: + type: array type: object + mirror: properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true clusterRole: - type: object properties: enabled: type: boolean name: type: string - clusterRoleBinding: type: object + clusterRoleBinding: properties: enabled: type: boolean name: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string gateway_config: - type: object properties: client: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -7006,11 +7253,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -7022,8 +7269,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -7035,9 +7282,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -7046,8 +7297,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -7055,13 +7310,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -7069,15 +7324,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -7085,8 +7343,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -7094,8 +7352,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -7115,8 +7379,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -7128,12 +7392,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -7143,7 +7414,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -7155,8 +7425,10 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object colocation: type: string discovery_duration: @@ -7168,10 +7440,8 @@ spec: namespace: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -7179,8 +7449,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -7188,8 +7458,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -7209,8 +7485,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -7222,43 +7498,45 @@ spec: type: boolean key: type: string + type: object + type: object pod_name: type: string register_duration: type: string self_mirror_addr: type: string - hpa: type: object + hpa: properties: enabled: type: boolean targetCPUUtilizationPercentage: type: integer - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - ingress: type: object + ingress: properties: annotations: type: object x-kubernetes-preserve-unknown-fields: true defaultBackend: - type: object properties: enabled: type: boolean + type: object enabled: type: boolean host: @@ -7267,47 +7545,48 @@ spec: type: string servicePort: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array internalTrafficPolicy: type: string kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxReplicas: - type: integer minimum: 0 + type: integer maxUnavailable: type: string minReplicas: - type: integer minimum: 0 + type: integer name: type: string nodeName: @@ -7316,12 +7595,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -7332,9 +7609,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -7346,11 +7621,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -7360,6 +7636,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -7374,28 +7651,29 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer resources: - type: object properties: limits: type: object @@ -7403,41 +7681,37 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -7445,6 +7719,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -7453,20 +7728,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -7486,6 +7759,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -7496,10 +7770,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7510,12 +7784,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7535,30 +7809,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -7566,6 +7840,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -7574,16 +7849,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -7603,6 +7876,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -7613,10 +7887,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7627,12 +7901,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7652,28 +7926,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -7681,6 +7955,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -7689,30 +7964,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -7732,6 +8005,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -7742,10 +8016,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7756,12 +8030,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7781,31 +8055,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -7813,6 +8087,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -7822,16 +8098,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -7847,20 +8122,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7871,12 +8155,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7896,33 +8180,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -7942,6 +8225,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -7952,10 +8236,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -7966,12 +8250,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -7991,14 +8275,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -8010,8 +8297,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -8019,118 +8307,165 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true - serviceAccount: type: object + serviceAccount: properties: enabled: type: boolean name: type: string + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true - manager: + type: array + type: object type: object + manager: properties: index: - type: object properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true corrector: - type: object properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object agent_namespace: type: string discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -8146,11 +8481,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -8162,8 +8497,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -8175,9 +8510,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -8186,8 +8525,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -8195,13 +8538,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -8209,15 +8552,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -8225,8 +8571,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -8234,8 +8580,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -8255,8 +8607,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -8268,12 +8620,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -8283,7 +8642,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -8295,17 +8653,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -8321,11 +8679,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -8337,8 +8695,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -8350,9 +8708,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -8361,8 +8723,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -8370,13 +8736,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -8384,15 +8750,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -8400,8 +8769,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -8409,8 +8778,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -8430,8 +8805,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -8443,12 +8818,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -8458,7 +8840,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -8470,26 +8851,27 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array gateway: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -8505,11 +8887,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -8521,8 +8903,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -8534,9 +8916,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -8545,8 +8931,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -8554,13 +8944,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -8568,15 +8958,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -8584,8 +8977,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -8593,8 +8986,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -8614,8 +9013,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -8627,12 +9026,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -8642,7 +9048,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -8654,26 +9059,28 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array kvs_background_compaction_interval: type: string kvs_background_sync_interval: @@ -8682,13 +9089,14 @@ spec: type: string node_name: type: string - observability: + nodeSelector: type: object + x-kubernetes-preserve-unknown-fields: true + observability: properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -8699,9 +9107,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -8713,11 +9119,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -8727,6 +9134,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -8741,35 +9149,32 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object schedule: type: string server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -8777,6 +9182,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -8785,20 +9191,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -8818,6 +9222,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -8828,10 +9233,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -8842,12 +9247,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -8867,30 +9272,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -8898,6 +9303,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -8906,16 +9312,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -8935,6 +9339,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -8945,10 +9350,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -8959,12 +9364,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -8984,28 +9389,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -9013,6 +9418,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -9021,30 +9427,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -9064,6 +9468,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -9074,10 +9479,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -9088,12 +9493,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -9113,31 +9518,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -9145,6 +9550,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -9154,16 +9561,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -9179,20 +9585,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -9203,12 +9618,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -9228,33 +9643,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -9274,6 +9688,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -9284,10 +9699,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -9298,12 +9713,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -9323,14 +9738,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -9342,40 +9760,89 @@ spec: type: boolean key: type: string + type: object + type: object startingDeadlineSeconds: type: integer stream_list_concurrency: - type: integer minimum: 1 + type: integer suspend: type: boolean + tolerations: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array ttlSecondsAfterFinished: type: integer version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ - creator: + type: string type: object + creator: properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object agent_namespace: type: string concurrency: - type: integer minimum: 1 + type: integer creation_pool_size: type: integer discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -9391,11 +9858,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -9407,8 +9874,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -9420,9 +9887,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -9431,8 +9902,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -9440,13 +9915,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -9454,15 +9929,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -9470,8 +9948,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -9479,8 +9957,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -9500,8 +9984,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -9513,12 +9997,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -9528,7 +10019,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -9540,17 +10030,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -9566,11 +10056,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -9582,8 +10072,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -9595,9 +10085,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -9606,8 +10100,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -9615,13 +10113,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -9629,15 +10127,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -9645,8 +10146,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -9654,8 +10155,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -9675,8 +10182,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -9688,12 +10195,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -9703,7 +10217,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -9715,46 +10228,50 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array name: type: string node_name: type: string - observability: + nodeSelector: type: object + x-kubernetes-preserve-unknown-fields: true + observability: properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -9765,9 +10282,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -9779,11 +10294,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -9793,6 +10309,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -9807,35 +10324,32 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object schedule: type: string server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -9843,6 +10357,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -9851,20 +10366,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -9884,6 +10397,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -9894,10 +10408,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -9908,12 +10422,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -9933,30 +10447,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -9964,6 +10478,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -9972,16 +10487,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -10001,6 +10514,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -10011,10 +10525,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -10025,12 +10539,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -10050,28 +10564,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -10079,6 +10593,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -10087,30 +10602,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -10130,6 +10643,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -10140,10 +10654,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -10154,12 +10668,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -10179,31 +10693,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -10211,6 +10725,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -10220,16 +10736,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -10245,20 +10760,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -10269,12 +10793,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -10294,33 +10818,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -10340,6 +10863,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -10350,10 +10874,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -10364,12 +10888,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -10389,14 +10913,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -10408,43 +10935,50 @@ spec: type: boolean key: type: string + type: object + type: object startingDeadlineSeconds: type: integer suspend: type: boolean target_addrs: - type: array items: type: string + type: array + tolerations: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array ttlSecondsAfterFinished: type: integer version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array externalTrafficPolicy: type: string image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - indexer: type: object + indexer: properties: agent_namespace: type: string @@ -10459,22 +10993,19 @@ spec: auto_save_index_wait_duration: type: string concurrency: - type: integer minimum: 1 + type: integer creation_pool_size: type: integer discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -10490,11 +11021,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -10506,8 +11037,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -10519,9 +11050,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -10530,8 +11065,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -10539,13 +11078,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -10553,15 +11092,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -10569,8 +11111,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -10578,8 +11120,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -10599,8 +11147,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -10612,12 +11160,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -10627,7 +11182,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -10639,17 +11193,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -10665,11 +11219,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -10681,8 +11235,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -10694,9 +11248,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -10705,8 +11263,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -10714,13 +11276,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -10728,15 +11290,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -10744,8 +11309,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -10753,8 +11318,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -10774,8 +11345,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -10787,12 +11358,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -10802,7 +11380,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -10814,43 +11391,47 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string + type: object node_name: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object maxUnavailable: type: string name: @@ -10861,12 +11442,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -10877,9 +11456,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -10891,11 +11468,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -10905,6 +11483,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -10919,113 +11498,114 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean - operator: + type: object type: object + operator: properties: affinity: - type: object properties: nodeAffinity: - type: object properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array requiredDuringSchedulingIgnoredDuringExecution: - type: object properties: nodeSelectorTerms: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - podAffinity: + type: array + type: object type: object + podAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true - podAntiAffinity: + type: array type: object + podAntiAffinity: properties: preferredDuringSchedulingIgnoredDuringExecution: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - requiredDuringSchedulingIgnoredDuringExecution: type: array + requiredDuringSchedulingIgnoredDuringExecution: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object annotations: type: object x-kubernetes-preserve-unknown-fields: true enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array kind: - type: string enum: - Deployment - DaemonSet + type: string logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object name: type: string namespace: @@ -11036,12 +11616,10 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -11052,9 +11630,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -11066,11 +11642,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -11080,6 +11657,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -11094,31 +11672,32 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer replicas: - type: integer minimum: 0 + type: integer resources: - type: object properties: limits: type: object @@ -11126,44 +11705,40 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string + type: object rotation_job_concurrency: - type: integer minimum: 1 + type: integer securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -11171,6 +11746,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -11179,20 +11755,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11212,6 +11786,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11222,10 +11797,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11236,12 +11811,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11261,30 +11836,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -11292,6 +11867,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -11300,16 +11876,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11329,6 +11903,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11339,10 +11914,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11353,12 +11928,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11378,28 +11953,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -11407,6 +11982,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -11415,30 +11991,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11458,6 +12032,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11468,10 +12043,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11482,12 +12057,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11507,31 +12082,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -11539,6 +12114,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -11548,16 +12125,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -11573,20 +12149,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11597,12 +12182,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11622,33 +12207,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11668,6 +12252,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11678,10 +12263,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11692,12 +12277,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -11717,14 +12302,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -11736,103 +12324,102 @@ spec: type: boolean key: type: string + type: object + type: object terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true podPriority: - type: object properties: enabled: type: boolean value: type: integer + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true progressDeadlineSeconds: type: integer readreplica: - type: object properties: rotator: - type: object properties: agent_namespace: type: string clusterRole: - type: object properties: enabled: type: boolean name: type: string - clusterRoleBinding: type: object + clusterRoleBinding: properties: enabled: type: boolean name: type: string + type: object env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array name: type: string observability: - type: object properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -11843,9 +12430,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -11857,11 +12442,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -11871,6 +12457,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -11885,11 +12472,13 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object podSecurityContext: type: object x-kubernetes-preserve-unknown-fields: true @@ -11897,27 +12486,22 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -11925,6 +12509,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -11933,20 +12518,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -11966,6 +12549,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -11976,10 +12560,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -11990,12 +12574,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12015,30 +12599,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -12046,6 +12630,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -12054,16 +12639,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -12083,6 +12666,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -12093,10 +12677,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -12107,12 +12691,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12132,28 +12716,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -12161,6 +12745,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -12169,30 +12754,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -12212,6 +12795,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -12222,10 +12806,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -12236,12 +12820,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12261,31 +12845,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -12293,6 +12877,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -12302,16 +12888,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -12327,20 +12912,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -12351,12 +12945,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12376,33 +12970,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -12422,6 +13015,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -12432,10 +13026,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -12446,12 +13040,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -12471,14 +13065,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -12490,25 +13087,28 @@ spec: type: boolean key: type: string - serviceAccount: + type: object type: object + serviceAccount: properties: enabled: type: boolean name: type: string + type: object target_read_replica_id_annotations_key: type: string ttlSecondsAfterFinished: type: integer version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string + type: object + type: object replicas: - type: integer minimum: 0 + type: integer resources: - type: object properties: limits: type: object @@ -12516,36 +13116,78 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object revisionHistoryLimit: - type: integer minimum: 0 + type: integer rollingUpdate: - type: object properties: maxSurge: type: string maxUnavailable: type: string - saver: type: object + saver: properties: + affinity: + properties: + nodeAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + properties: + nodeSelectorTerms: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + podAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + podAntiAffinity: + properties: + preferredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + requiredDuringSchedulingIgnoredDuringExecution: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object agent_namespace: type: string concurrency: - type: integer minimum: 1 + type: integer discoverer: - type: object properties: agent_client_options: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -12561,11 +13203,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -12577,8 +13219,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -12590,9 +13232,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -12601,8 +13247,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -12610,13 +13260,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -12624,15 +13274,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -12640,8 +13293,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -12649,8 +13302,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -12670,8 +13329,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -12683,12 +13342,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -12698,7 +13364,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -12710,17 +13375,17 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - client: type: object + client: properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -12736,11 +13401,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -12752,8 +13417,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -12765,9 +13430,13 @@ spec: type: string size: type: integer - dial_option: type: object + content_subtype: + type: string + dial_option: properties: + authority: + type: string backoff_base_delay: type: string backoff_jitter: @@ -12776,8 +13445,12 @@ spec: type: string backoff_multiplier: type: number + disable_retry: + type: boolean enable_backoff: type: boolean + idle_timeout: + type: string initial_connection_window_size: type: integer initial_window_size: @@ -12785,13 +13458,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -12799,15 +13472,18 @@ spec: type: string timeout: type: string + type: object + max_call_attempts: + type: integer + max_header_list_size: + type: integer max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -12815,8 +13491,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -12824,8 +13500,14 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + network: + enum: + - tcp + - udp + - unix + type: string + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -12845,8 +13527,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -12858,12 +13540,19 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer + shared_write_buffer: + type: boolean timeout: type: string + user_agent: + type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -12873,7 +13562,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -12885,46 +13573,50 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object duration: type: string + type: object enabled: type: boolean env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string + type: object initContainers: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array name: type: string node_name: type: string - observability: + nodeSelector: type: object + x-kubernetes-preserve-unknown-fields: true + observability: properties: enabled: type: boolean metrics: - type: object properties: enable_cgo: type: boolean @@ -12935,9 +13627,7 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: - type: string enum: - vald_version - server_name @@ -12949,11 +13639,12 @@ spec: - cgo_enabled - algorithm_info - build_cpu_info_flags - otlp: + type: string + type: array type: object + otlp: properties: attribute: - type: object properties: namespace: type: string @@ -12963,6 +13654,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -12977,35 +13669,32 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean + type: object + type: object schedule: type: string server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13013,6 +13702,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13021,20 +13711,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13054,6 +13742,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13064,10 +13753,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13078,12 +13767,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13103,30 +13792,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13134,6 +13823,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13142,16 +13832,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13171,6 +13859,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13181,10 +13870,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13195,12 +13884,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13220,28 +13909,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13249,6 +13938,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13257,30 +13947,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13300,6 +13988,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13310,10 +13999,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13324,12 +14013,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13349,31 +14038,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -13381,6 +14070,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -13390,16 +14081,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -13415,20 +14105,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13439,12 +14138,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13464,33 +14163,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13510,6 +14208,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13520,10 +14219,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13534,12 +14233,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13559,14 +14258,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -13578,44 +14280,47 @@ spec: type: boolean key: type: string + type: object + type: object startingDeadlineSeconds: type: integer suspend: type: boolean target_addrs: - type: array items: type: string + type: array + tolerations: + items: + type: object + x-kubernetes-preserve-unknown-fields: true + type: array ttlSecondsAfterFinished: type: integer version: - type: string pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13623,6 +14328,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13631,20 +14337,18 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13664,6 +14368,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13674,10 +14379,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13688,12 +14393,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13713,30 +14418,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13744,6 +14449,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13752,16 +14458,14 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13781,6 +14485,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13791,10 +14496,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13805,12 +14510,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13830,28 +14535,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -13859,6 +14564,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -13867,30 +14573,28 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -13910,6 +14614,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -13920,10 +14625,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -13934,12 +14639,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -13959,31 +14664,31 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - servers: + type: integer + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -13991,6 +14696,8 @@ spec: type: string enable_admin: type: boolean + enable_channelz: + type: boolean enable_reflection: type: boolean header_table_size: @@ -14000,16 +14707,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -14025,20 +14731,29 @@ spec: type: string timeout: type: string + type: object + max_concurrent_streams: + type: integer max_header_list_size: type: integer max_receive_message_size: type: integer max_send_message_size: type: integer + num_stream_workers: + type: integer read_buffer_size: type: integer + shared_write_buffer: + type: boolean + wait_for_handlers: + type: boolean write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -14049,12 +14764,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -14074,33 +14789,32 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string http2: - type: object properties: enabled: type: boolean @@ -14120,6 +14834,7 @@ spec: type: integer permit_prohibited_cipher_suites: type: boolean + type: object idle_timeout: type: string read_header_timeout: @@ -14130,10 +14845,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -14144,12 +14859,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -14169,14 +14884,17 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - tls: + type: integer + type: object type: object + tls: properties: ca: type: string @@ -14188,8 +14906,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -14197,37 +14916,46 @@ spec: labels: type: object x-kubernetes-preserve-unknown-fields: true + type: object serviceType: - type: string enum: - ClusterIP - LoadBalancer - NodePort + type: string terminationGracePeriodSeconds: - type: integer minimum: 0 + type: integer time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - topologySpreadConstraints: type: array + topologySpreadConstraints: items: type: object x-kubernetes-preserve-unknown-fields: true - version: + type: array + unhealthyPodEvictionPolicy: + enum: + - AlwaysAllow + - IfHealthyBudget type: string + version: pattern: ^v[0-9]+\.[0-9]+\.[0-9]$ + type: string volumeMounts: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true - volumes: type: array + volumes: items: type: object x-kubernetes-preserve-unknown-fields: true + type: array + type: object + type: object + type: object diff --git a/k8s/operator/helm/operator.yaml b/k8s/operator/helm/operator.yaml index 2f72879c63..2401657586 100644 --- a/k8s/operator/helm/operator.yaml +++ b/k8s/operator/helm/operator.yaml @@ -21,10 +21,10 @@ metadata: labels: app: vald-helm-operator app.kubernetes.io/name: vald-helm-operator - helm.sh/chart: vald-helm-operator-v1.7.13 + helm.sh/chart: vald-helm-operator-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: helm-operator spec: replicas: 2 @@ -42,7 +42,7 @@ spec: serviceAccountName: vald-helm-operator containers: - name: vald-helm-operator - image: "vdaas/vald-helm-operator:v1.7.13" + image: "vdaas/vald-helm-operator:v1.7.16" imagePullPolicy: Always args: - "--leader-elect" diff --git a/k8s/operator/helm/svc.yaml b/k8s/operator/helm/svc.yaml index f37b9d7930..fa11eeabb2 100644 --- a/k8s/operator/helm/svc.yaml +++ b/k8s/operator/helm/svc.yaml @@ -19,10 +19,10 @@ metadata: name: vald-helm-operator labels: app.kubernetes.io/name: vald-helm-operator - helm.sh/chart: vald-helm-operator-v1.7.13 + helm.sh/chart: vald-helm-operator-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: helm-operator spec: ports: diff --git a/k8s/readreplica/configmap.yaml b/k8s/readreplica/configmap.yaml index 890d93aaf6..863949ea60 100644 --- a/k8s/readreplica/configmap.yaml +++ b/k8s/readreplica/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-agent-ngt-readreplica-config labels: app.kubernetes.io/name: vald-readreplica - helm.sh/chart: vald-readreplica-v1.7.13 + helm.sh/chart: vald-readreplica-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: agent data: config.yaml: | @@ -42,6 +42,7 @@ data: bidirectional_stream_concurrency: 20 connection_timeout: "" enable_admin: true + enable_channelz: true enable_reflection: true header_table_size: 0 initial_conn_window_size: 2097152 @@ -56,10 +57,14 @@ data: permit_without_stream: false time: 3h timeout: 60s + max_concurrent_streams: 0 max_header_list_size: 0 max_receive_message_size: 0 max_send_message_size: 0 + num_stream_workers: 0 read_buffer_size: 0 + shared_write_buffer: false + wait_for_handlers: true write_buffer_size: 0 mode: GRPC network: tcp @@ -82,6 +87,16 @@ data: port: 3000 http: handler_timeout: "" + http2: + enabled: false + handler_limit: 0 + max_concurrent_streams: 0 + max_decoder_header_table_size: 4096 + max_encoder_header_table_size: 4096 + max_read_frame_size: 0 + max_upload_buffer_per_connection: 0 + max_upload_buffer_per_stream: 0 + permit_prohibited_cipher_suites: true idle_timeout: "" read_header_timeout: "" read_timeout: "" @@ -107,6 +122,16 @@ data: port: 3001 http: handler_timeout: "" + http2: + enabled: false + handler_limit: 0 + max_concurrent_streams: 0 + max_decoder_header_table_size: 4096 + max_encoder_header_table_size: 4096 + max_read_frame_size: 0 + max_upload_buffer_per_connection: 0 + max_upload_buffer_per_stream: 0 + permit_prohibited_cipher_suites: true idle_timeout: "" read_header_timeout: "" read_timeout: "" diff --git a/k8s/tools/benchmark/operator/configmap.yaml b/k8s/tools/benchmark/operator/configmap.yaml index 446e00aa93..4ec4c55c6c 100644 --- a/k8s/tools/benchmark/operator/configmap.yaml +++ b/k8s/tools/benchmark/operator/configmap.yaml @@ -19,10 +19,10 @@ metadata: name: vald-benchmark-operator-config labels: app.kubernetes.io/name: vald-benchmark-operator - helm.sh/chart: vald-benchmark-operator-v1.7.13 + helm.sh/chart: vald-benchmark-operator-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: benchmark-operator data: config.yaml: | @@ -219,4 +219,4 @@ data: image: pullPolicy: Always repository: vdaas/vald-benchmark-job - tag: v1.7.13 + tag: v1.7.16 diff --git a/k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml b/k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml index 7c0037a53b..2bc9d474d9 100644 --- a/k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml +++ b/k8s/tools/benchmark/operator/crds/valdbenchmarkjob.yaml @@ -64,17 +64,14 @@ spec: default: Available type: string spec: - type: object properties: client_config: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -90,11 +87,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -106,8 +103,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -119,8 +116,8 @@ spec: type: string size: type: integer - dial_option: type: object + dial_option: properties: backoff_base_delay: type: string @@ -139,13 +136,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -153,15 +150,14 @@ spec: type: string timeout: type: string + type: object max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -169,8 +165,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -178,8 +174,8 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -199,8 +195,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -212,12 +208,15 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer timeout: type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -227,7 +226,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -239,38 +237,39 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean + type: object concurrency_limit: - type: integer maximum: 65535 minimum: 0 + type: integer dataset: - type: object properties: group: - type: string minLength: 1 + type: string indexes: - type: integer minimum: 0 + type: integer name: - type: string enum: - original - fashion-mnist + type: string range: - type: object properties: end: - type: integer minimum: 1 - start: type: integer + start: minimum: 1 + type: integer required: - start - end + type: object url: type: string required: @@ -278,43 +277,43 @@ spec: - indexes - group - range - global_config: type: object + global_config: properties: logging: - type: object properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object time_zone: type: string version: type: string - insert_config: type: object + insert_config: properties: skip_strict_exist_check: type: boolean timestamp: type: string + type: object job_type: - type: string enum: - insert - update @@ -323,46 +322,46 @@ spec: - remove - getobject - exists + type: string object_config: - type: object properties: filter_config: - type: object properties: host: type: string - remove_config: + type: object type: object + remove_config: properties: skip_strict_exist_check: type: boolean timestamp: type: string + type: object repetition: - type: integer minimum: 1 - replica: type: integer + replica: minimum: 1 - rps: type: integer + rps: maximum: 65535 minimum: 0 + type: integer rules: - type: array items: type: string + type: array search_config: - type: object properties: aggregation_algorithm: - type: string enum: - Unknown - ConcurrentQueue - SortSlice - SortPoolSlice - PairingHeap + type: string enable_linear_search: type: boolean epsilon: @@ -375,26 +374,22 @@ spec: type: number timeout: type: string - server_config: type: object + server_config: properties: healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -402,6 +397,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -410,15 +406,14 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string @@ -432,10 +427,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -446,10 +441,10 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -469,30 +464,30 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - readiness: + type: integer type: object + readiness: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -500,6 +495,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -508,11 +504,10 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string @@ -526,10 +521,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -540,10 +535,10 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -563,28 +558,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - startup: + type: integer type: object + startup: properties: enabled: type: boolean port: - type: integer maximum: 65535 minimum: 0 + type: integer startupProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -592,6 +587,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -600,25 +596,24 @@ spec: type: integer timeoutSeconds: type: integer - servers: + type: object + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -633,16 +628,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: - type: string enum: - RecoverInterceptor - AccessLogInterceptor - TraceInterceptor - MetricInterceptor + type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -658,6 +652,7 @@ spec: type: string timeout: type: string + type: object max_header_list_size: type: integer max_receive_message_size: @@ -668,10 +663,10 @@ spec: type: integer write_buffer_size: type: integer + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -682,12 +677,12 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string restart: type: boolean socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -707,28 +702,28 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - rest: + type: integer type: object + rest: properties: enabled: type: boolean host: type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string @@ -742,10 +737,10 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: - type: string enum: - tcp - tcp4 @@ -756,10 +751,10 @@ spec: - unix - unixgram - unixpacket + type: string probe_wait_time: type: string socket_option: - type: object properties: ip_recover_destination_addr: type: boolean @@ -779,31 +774,35 @@ spec: type: boolean tcp_quick_ack: type: boolean + type: object socket_path: type: string + type: object servicePort: - type: integer maximum: 65535 minimum: 0 - target: + type: integer + type: object + type: object type: object + target: properties: host: - type: string minLength: 1 + type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer required: - host - port + type: object ttl_seconds_after_finished: - type: integer maximum: 65535 minimum: 0 + type: integer update_config: - type: object properties: disable_balance_update: type: boolean @@ -811,8 +810,8 @@ spec: type: boolean timestamp: type: string - upsert_config: type: object + upsert_config: properties: disable_balance_update: type: boolean @@ -820,3 +819,5 @@ spec: type: boolean timestamp: type: string + type: object + type: object diff --git a/k8s/tools/benchmark/operator/crds/valdbenchmarkoperatorrelease.yaml b/k8s/tools/benchmark/operator/crds/valdbenchmarkoperatorrelease.yaml index e6baaa00a8..1ede034eb9 100644 --- a/k8s/tools/benchmark/operator/crds/valdbenchmarkoperatorrelease.yaml +++ b/k8s/tools/benchmark/operator/crds/valdbenchmarkoperatorrelease.yaml @@ -60,7 +60,6 @@ spec: - Healthy type: string spec: - type: object properties: affinity: type: object @@ -69,35 +68,32 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true env: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array image: - type: object properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - job: type: object + job: properties: client_config: - type: object properties: addrs: - type: array items: type: string + type: array backoff: - type: object properties: backoff_factor: type: number @@ -113,11 +109,11 @@ spec: type: string retry_count: type: integer + type: object call_option: type: object x-kubernetes-preserve-unknown-fields: true circuit_breaker: - type: object properties: closed_error_rate: type: number @@ -129,8 +125,8 @@ spec: type: integer open_timeout: type: string - connection_pool: type: object + connection_pool: properties: enable_dns_resolver: type: boolean @@ -142,8 +138,8 @@ spec: type: string size: type: integer - dial_option: type: object + dial_option: properties: backoff_base_delay: type: string @@ -162,13 +158,13 @@ spec: insecure: type: boolean interceptors: - type: array items: - type: string enum: - TraceInterceptor + - MetricInterceptor + type: string + type: array keepalive: - type: object properties: permit_without_stream: type: boolean @@ -176,15 +172,14 @@ spec: type: string timeout: type: string + type: object max_msg_size: type: integer min_connection_timeout: type: string net: - type: object properties: dialer: - type: object properties: dual_stack_enabled: type: boolean @@ -192,8 +187,8 @@ spec: type: string timeout: type: string - dns: type: object + dns: properties: cache_enabled: type: boolean @@ -201,8 +196,8 @@ spec: type: string refresh_duration: type: string - socket_option: type: object + socket_option: properties: ip_recover_destination_addr: type: boolean @@ -222,8 +217,8 @@ spec: type: boolean tcp_quick_ack: type: boolean - tls: type: object + tls: properties: ca: type: string @@ -235,12 +230,15 @@ spec: type: boolean key: type: string + type: object + type: object read_buffer_size: type: integer timeout: type: string write_buffer_size: type: integer + type: object health_check_duration: type: string max_recv_msg_size: @@ -250,7 +248,6 @@ spec: max_send_msg_size: type: integer tls: - type: object properties: ca: type: string @@ -262,60 +259,59 @@ spec: type: boolean key: type: string + type: object wait_for_ready: type: boolean - image: type: object + image: properties: pullPolicy: - type: string enum: - Always - Never - IfNotPresent + type: string repository: type: string tag: type: string - logging: + type: object type: object + logging: properties: format: - type: string enum: - raw - json - level: type: string + level: enum: - debug - info - warn - error - fatal - logger: type: string + logger: enum: - glg - zap + type: string + type: object name: type: string nodeSelector: type: object x-kubernetes-preserve-unknown-fields: true observability: - type: object properties: enabled: type: boolean otlp: - type: object properties: attribute: - type: object properties: metrics: - type: object properties: enable_cgo: type: boolean @@ -326,9 +322,10 @@ spec: enable_version_info: type: boolean version_info_labels: - type: array items: type: string + type: array + type: object namespace: type: string node_name: @@ -337,6 +334,7 @@ spec: type: string service_name: type: string + type: object collector_endpoint: type: string metrics_export_interval: @@ -351,13 +349,15 @@ spec: type: integer trace_max_queue_size: type: integer - trace: type: object + trace: properties: enabled: type: boolean sampling_rate: type: integer + type: object + type: object podAnnotations: type: object x-kubernetes-preserve-unknown-fields: true @@ -365,16 +365,15 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true rbac: - type: object properties: create: type: boolean name: type: string + type: object replicas: type: integer resources: - type: object properties: limits: type: object @@ -382,31 +381,27 @@ spec: requests: type: object x-kubernetes-preserve-unknown-fields: true + type: object securityContext: type: object x-kubernetes-preserve-unknown-fields: true server_config: - type: object properties: full_shutdown_duration: type: string healths: - type: object properties: liveness: - type: object properties: enabled: type: boolean host: type: string livenessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -414,6 +409,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -422,13 +418,12 @@ spec: type: integer timeoutSeconds: type: integer + type: object port: type: integer server: - type: object properties: http: - type: object properties: idle_timeout: type: string @@ -442,6 +437,7 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: @@ -450,10 +446,11 @@ spec: type: string socket_path: type: string + type: object servicePort: type: integer - readiness: type: object + readiness: properties: enabled: type: boolean @@ -462,12 +459,10 @@ spec: port: type: integer readinessProbe: - type: object properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -475,6 +470,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -483,11 +479,10 @@ spec: type: integer timeoutSeconds: type: integer - server: type: object + server: properties: http: - type: object properties: handler_timeout: type: string @@ -501,6 +496,7 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: @@ -509,20 +505,20 @@ spec: type: string socket_path: type: string + type: object servicePort: type: integer - startup: type: object + startup: properties: enabled: type: boolean - startupProbe: type: object + startupProbe: properties: failureThreshold: type: integer httpGet: - type: object properties: path: type: string @@ -530,6 +526,7 @@ spec: type: string scheme: type: string + type: object initialDelaySeconds: type: integer periodSeconds: @@ -538,11 +535,11 @@ spec: type: integer timeoutSeconds: type: integer - metrics: + type: object type: object + metrics: properties: pprof: - type: object properties: enabled: type: boolean @@ -551,10 +548,8 @@ spec: port: type: integer server: - type: object properties: http: - type: object properties: handler_timeout: type: string @@ -568,6 +563,7 @@ spec: type: string write_timeout: type: string + type: object mode: type: string network: @@ -576,11 +572,12 @@ spec: type: string socket_path: type: string - servers: + type: object + type: object type: object + servers: properties: grpc: - type: object properties: enabled: type: boolean @@ -591,10 +588,8 @@ spec: port: type: integer server: - type: object properties: grpc: - type: object properties: bidirectional_stream_concurrency: type: integer @@ -609,11 +604,15 @@ spec: initial_window_size: type: integer interceptors: - type: array items: + enum: + - RecoverInterceptor + - AccessLogInterceptor + - TraceInterceptor + - MetricInterceptor type: string + type: array keepalive: - type: object properties: max_conn_age: type: string @@ -629,6 +628,7 @@ spec: type: string timeout: type: string + type: object max_header_list_size: type: integer max_receive_message_size: @@ -639,6 +639,7 @@ spec: type: integer write_buffer_size: type: integer + type: object mode: type: string network: @@ -649,15 +650,17 @@ spec: type: boolean socket_path: type: string + type: object servicePort: type: integer - rest: type: object + rest: properties: enabled: type: boolean - tls: + type: object type: object + tls: properties: ca: type: string @@ -669,8 +672,9 @@ spec: type: boolean key: type: string - service: + type: object type: object + service: properties: annotations: type: object @@ -683,24 +687,26 @@ spec: type: object x-kubernetes-preserve-unknown-fields: true type: - type: string enum: - ClusterIP - LoadBalancer - NodePort - serviceAccount: + type: string type: object + serviceAccount: properties: create: type: boolean name: type: string + type: object time_zone: type: string tolerations: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array version: type: string + type: object diff --git a/k8s/tools/benchmark/operator/crds/valdbenchmarkscenario.yaml b/k8s/tools/benchmark/operator/crds/valdbenchmarkscenario.yaml index 5c0c6ccbe1..eab6b08774 100644 --- a/k8s/tools/benchmark/operator/crds/valdbenchmarkscenario.yaml +++ b/k8s/tools/benchmark/operator/crds/valdbenchmarkscenario.yaml @@ -61,34 +61,32 @@ spec: default: Available type: string spec: - type: object properties: dataset: - type: object properties: group: - type: string minLength: 1 + type: string indexes: - type: integer minimum: 0 + type: integer name: - type: string enum: - original - fashion-mnist + type: string range: - type: object properties: end: - type: integer minimum: 1 - start: type: integer + start: minimum: 1 + type: integer required: - start - end + type: object url: type: string required: @@ -96,21 +94,23 @@ spec: - indexes - group - range + type: object jobs: - type: array items: type: object x-kubernetes-preserve-unknown-fields: true + type: array target: - type: object properties: host: - type: string minLength: 1 + type: string port: - type: integer maximum: 65535 minimum: 0 + type: integer required: - host - port + type: object + type: object diff --git a/k8s/tools/benchmark/operator/deployment.yaml b/k8s/tools/benchmark/operator/deployment.yaml index b6b47adf29..f279dc595b 100644 --- a/k8s/tools/benchmark/operator/deployment.yaml +++ b/k8s/tools/benchmark/operator/deployment.yaml @@ -21,10 +21,10 @@ metadata: labels: app: vald-benchmark-operator app.kubernetes.io/name: vald-benchmark-operator - helm.sh/chart: vald-benchmark-operator-v1.7.13 + helm.sh/chart: vald-benchmark-operator-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: benchmark-operator spec: replicas: 1 @@ -42,7 +42,7 @@ spec: serviceAccountName: vald-benchmark-operator containers: - name: vald-benchmark-operator - image: "vdaas/vald-benchmark-operator:v1.7.13" + image: "vdaas/vald-benchmark-operator:v1.7.16" imagePullPolicy: Always livenessProbe: failureThreshold: 2 diff --git a/k8s/tools/benchmark/operator/service.yaml b/k8s/tools/benchmark/operator/service.yaml index 62635417f6..fdd47c5d8b 100644 --- a/k8s/tools/benchmark/operator/service.yaml +++ b/k8s/tools/benchmark/operator/service.yaml @@ -19,10 +19,10 @@ metadata: name: vald-benchmark-operator labels: app.kubernetes.io/name: vald-benchmark-operator - helm.sh/chart: vald-benchmark-operator-v1.7.13 + helm.sh/chart: vald-benchmark-operator-v1.7.16 app.kubernetes.io/managed-by: Helm app.kubernetes.io/instance: release-name - app.kubernetes.io/version: v1.7.13 + app.kubernetes.io/version: v1.7.16 app.kubernetes.io/component: helm-operator spec: ports: diff --git a/pkg/agent/core/faiss/handler/grpc/option.go b/pkg/agent/core/faiss/handler/grpc/option.go index 60b933ad91..5722502a81 100644 --- a/pkg/agent/core/faiss/handler/grpc/option.go +++ b/pkg/agent/core/faiss/handler/grpc/option.go @@ -18,12 +18,12 @@ package grpc import ( - "os" "runtime" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net" + "github.com/vdaas/vald/internal/os" "github.com/vdaas/vald/internal/sync/errgroup" "github.com/vdaas/vald/pkg/agent/core/faiss/service" ) diff --git a/pkg/agent/core/faiss/service/faiss.go b/pkg/agent/core/faiss/service/faiss.go index b7242a1859..dcea0649b2 100644 --- a/pkg/agent/core/faiss/service/faiss.go +++ b/pkg/agent/core/faiss/service/faiss.go @@ -42,6 +42,7 @@ import ( "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/internal/sync/errgroup" "github.com/vdaas/vald/pkg/agent/internal/kvs" + "github.com/vdaas/vald/pkg/agent/internal/memstore" "github.com/vdaas/vald/pkg/agent/internal/metadata" "github.com/vdaas/vald/pkg/agent/internal/vqueue" ) @@ -49,23 +50,25 @@ import ( type ( Faiss interface { Start(ctx context.Context) <-chan error - Train(nb int, xb []float32) error - Insert(uuid string, xb []float32) error - InsertWithTime(uuid string, vec []float32, t int64) error - Update(uuid string, vec []float32) error - UpdateWithTime(uuid string, vec []float32, t int64) error - CreateIndex(ctx context.Context) error - SaveIndex(ctx context.Context) error - CreateAndSaveIndex(ctx context.Context) error Search(k, nprobe, nq uint32, xq []float32) (*payload.Search_Response, error) - Delete(uuid string) error - DeleteWithTime(uuid string, t int64) error + Insert(uuid string, vec []float32) (err error) + InsertWithTime(uuid string, vec []float32, t int64) (err error) + Update(uuid string, vec []float32) (err error) + UpdateWithTime(uuid string, vec []float32, t int64) (err error) + UpdateTimestamp(uuid string, ts int64, force bool) (err error) + Delete(uuid string) (err error) + DeleteWithTime(uuid string, t int64) (err error) Exists(uuid string) (uint32, bool) + CreateIndex(ctx context.Context) (err error) + SaveIndex(ctx context.Context) (err error) + CreateAndSaveIndex(ctx context.Context) (err error) + Train(nb int, vec []float32) (err error) IsIndexing() bool IsSaving() bool + Len() uint64 NumberOfCreateIndexExecution() uint64 NumberOfProactiveGCExecution() uint64 - Len() uint64 + UUIDs(context.Context) (uuids []string) InsertVQueueBufferLen() uint64 DeleteVQueueBufferLen() uint64 GetDimensionSize() int @@ -722,6 +725,10 @@ func (f *faiss) update(uuid string, vec []float32, t int64) (err error) { return f.insert(uuid, vec, t, false) } +func (f *faiss) UpdateTimestamp(uuid string, ts int64, force bool) (err error) { + return memstore.UpdateTimestamp(f.kvs, f.vq, uuid, ts, force, nil) +} + func (f *faiss) readyForUpdate(uuid string, vec []float32) (err error) { if len(uuid) == 0 { return errors.ErrUUIDNotFound(0) @@ -916,7 +923,7 @@ func (f *faiss) saveIndex(ctx context.Context) error { // no cleanup invalid index eg, ectx := errgroup.New(ctx) - // we want to ensure the acutal kvs size between kvsdb and metadata, + // we want to ensure the actual kvs size between kvsdb and metadata, // so we create this counter to count the actual kvs size instead of using kvs.Len() var ( kvsLen uint64 @@ -932,10 +939,14 @@ func (f *faiss) saveIndex(ctx context.Context) error { f.smu.Lock() defer f.smu.Unlock() - eg.Go(safety.RecoverFunc(func() (err error) { - if f.kvs.Len() > 0 && path != "" { + if f.kvs.Len() > 0 && path != "" { + eg.Go(safety.RecoverFunc(func() (err error) { m := make(map[string]uint32, f.Len()) mt := make(map[string]int64, f.Len()) + defer func() { + m = nil + mt = nil + }() var mu sync.Mutex f.kvs.Range(ectx, func(key string, id uint32, ts int64) bool { @@ -1008,10 +1019,10 @@ func (f *faiss) saveIndex(ctx context.Context) error { } mt = make(map[string]int64) - } - return nil - })) + return nil + })) + } eg.Go(safety.RecoverFunc(func() (err error) { f.fmu.Lock() @@ -1032,7 +1043,7 @@ func (f *faiss) saveIndex(ctx context.Context) error { if fi != nil { derr := fi.Close() if derr != nil { - err = errors.Wrap(err, derr.Error()) + err = errors.Join(err, derr) } } }() @@ -1044,7 +1055,6 @@ func (f *faiss) saveIndex(ctx context.Context) error { if err != nil { return err } - err = fi.Sync() if err != nil { return err @@ -1167,7 +1177,8 @@ func (f *faiss) delete(uuid string, t int64, validation bool) error { if validation { _, _, ok := f.kvs.Get(uuid) - if !ok && !f.vq.IVExists(uuid) { + _, ivqok := f.vq.IVExists(uuid) + if !ok && !ivqok { return errors.ErrObjectIDNotFound(uuid) } } @@ -1175,27 +1186,12 @@ func (f *faiss) delete(uuid string, t int64, validation bool) error { return f.vq.PushDelete(uuid, t) } -func (f *faiss) Exists(uuid string) (uint32, bool) { - var ( - oid uint32 - ok bool - ) - - ok = f.vq.IVExists(uuid) - if !ok { - oid, _, ok = f.kvs.Get(uuid) - if !ok { - log.Debugf("Exists\tuuid: %s's data not found in kvsdb and insert vqueue\terror: %v", uuid, errors.ErrObjectIDNotFound(uuid)) - return 0, false - } - if f.vq.DVExists(uuid) { - log.Debugf("Exists\tuuid: %s's data found in kvsdb and not found in insert vqueue, but delete vqueue data exists. the object will be delete soon\terror: %v", - uuid, errors.ErrObjectIDNotFound(uuid)) - return 0, false - } - } +func (f *faiss) Exists(uuid string) (oid uint32, ok bool) { + return memstore.Exists(f.kvs, f.vq, uuid) +} - return oid, ok +func (f *faiss) GetObject(uuid string) (vec []float32, timestamp int64, err error) { + return memstore.GetObject(f.kvs, f.vq, uuid, nil) } func (f *faiss) IsIndexing() bool { @@ -1208,6 +1204,10 @@ func (f *faiss) IsSaving() bool { return s && ok } +func (f *faiss) UUIDs(ctx context.Context) (uuids []string) { + return memstore.UUIDs(ctx, f.kvs, f.vq) +} + func (f *faiss) NumberOfCreateIndexExecution() uint64 { return atomic.LoadUint64(&f.nocie) } @@ -1243,8 +1243,20 @@ func (f *faiss) GetTrainSize() int { return f.trainSize } -func (f *faiss) Close(ctx context.Context) error { - err := f.kvs.Close() +func (f *faiss) Close(ctx context.Context) (err error) { + defer f.core.Close() + defer func() { + kerr := f.kvs.Close() + if kerr != nil && + !errors.Is(err, context.Canceled) && + !errors.Is(err, context.DeadlineExceeded) { + if err != nil { + err = errors.Join(kerr, err) + } else { + err = kerr + } + } + }() if len(f.path) != 0 { cerr := f.CreateIndex(ctx) if cerr != nil && @@ -1252,28 +1264,33 @@ func (f *faiss) Close(ctx context.Context) error { !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { if err != nil { - err = errors.Wrap(cerr, err.Error()) + err = errors.Join(cerr, err) } else { err = cerr } } - serr := f.SaveIndex(ctx) if serr != nil && !errors.Is(err, errors.ErrUncommittedIndexNotFound) && !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { if err != nil { - err = errors.Wrap(serr, err.Error()) + err = errors.Join(serr, err) } else { err = serr } } } + return err +} - f.core.Close() - - return nil +// ListObjectFunc applies the input function on each index stored in the kvs and vqueue. +// Use this function for performing something on each object with caring about the memory usage. +// If the vector exists in the vqueue, this vector is not indexed so the oid(object ID) is processed as 0. +func (f *faiss) ListObjectFunc( + ctx context.Context, fn func(uuid string, oid uint32, ts int64) bool, +) { + memstore.ListObjectFunc(ctx, f.kvs, f.vq, fn) } func (f *faiss) toSearchResponse( diff --git a/pkg/agent/core/faiss/service/faiss_test.go b/pkg/agent/core/faiss/service/faiss_test.go index 47b0228427..abf0409def 100644 --- a/pkg/agent/core/faiss/service/faiss_test.go +++ b/pkg/agent/core/faiss/service/faiss_test.go @@ -3001,10 +3001,11 @@ package service // } // } // -// func Test_faiss_readyForUpdate(t *testing.T) { +// func Test_faiss_UpdateTimestamp(t *testing.T) { // type args struct { -// uuid string -// vec []float32 +// uuid string +// ts int64 +// force bool // } // type fields struct { // core core.Faiss @@ -3069,7 +3070,8 @@ package service // name: "test_case_1", // args: args { // uuid:"", -// vec:nil, +// ts:0, +// force:false, // }, // fields: fields { // core:nil, @@ -3127,7 +3129,8 @@ package service // name: "test_case_2", // args: args { // uuid:"", -// vec:nil, +// ts:0, +// force:false, // }, // fields: fields { // core:nil, @@ -3234,7 +3237,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.readyForUpdate(test.args.uuid, test.args.vec) +// err := f.UpdateTimestamp(test.args.uuid, test.args.ts, test.args.force) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -3242,9 +3245,10 @@ package service // } // } // -// func Test_faiss_CreateIndex(t *testing.T) { +// func Test_faiss_readyForUpdate(t *testing.T) { // type args struct { -// ctx context.Context +// uuid string +// vec []float32 // } // type fields struct { // core core.Faiss @@ -3308,7 +3312,8 @@ package service // { // name: "test_case_1", // args: args { -// ctx:nil, +// uuid:"", +// vec:nil, // }, // fields: fields { // core:nil, @@ -3365,7 +3370,8 @@ package service // return test { // name: "test_case_2", // args: args { -// ctx:nil, +// uuid:"", +// vec:nil, // }, // fields: fields { // core:nil, @@ -3472,7 +3478,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.CreateIndex(test.args.ctx) +// err := f.readyForUpdate(test.args.uuid, test.args.vec) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -3480,7 +3486,7 @@ package service // } // } // -// func Test_faiss_SaveIndex(t *testing.T) { +// func Test_faiss_CreateIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -3710,7 +3716,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.SaveIndex(test.args.ctx) +// err := f.CreateIndex(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -3718,7 +3724,7 @@ package service // } // } // -// func Test_faiss_saveIndex(t *testing.T) { +// func Test_faiss_SaveIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -3948,7 +3954,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.saveIndex(test.args.ctx) +// err := f.SaveIndex(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -3956,7 +3962,7 @@ package service // } // } // -// func Test_faiss_moveAndSwitchSavedData(t *testing.T) { +// func Test_faiss_saveIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -4186,7 +4192,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.moveAndSwitchSavedData(test.args.ctx) +// err := f.saveIndex(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -4194,7 +4200,7 @@ package service // } // } // -// func Test_faiss_CreateAndSaveIndex(t *testing.T) { +// func Test_faiss_moveAndSwitchSavedData(t *testing.T) { // type args struct { // ctx context.Context // } @@ -4424,7 +4430,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.CreateAndSaveIndex(test.args.ctx) +// err := f.moveAndSwitchSavedData(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -4432,11 +4438,9 @@ package service // } // } // -// func Test_faiss_Search(t *testing.T) { +// func Test_faiss_CreateAndSaveIndex(t *testing.T) { // type args struct { -// k uint32 -// nq uint32 -// xq []float32 +// ctx context.Context // } // type fields struct { // core core.Faiss @@ -4477,25 +4481,21 @@ package service // kvsdbConcurrency int // } // type want struct { -// wantRes *payload.Search_Response -// err error +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, *payload.Search_Response, error) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { +// defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } -// if !reflect.DeepEqual(gotRes, w.wantRes) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) -// } // return nil // } // tests := []test{ @@ -4504,9 +4504,7 @@ package service // { // name: "test_case_1", // args: args { -// k:0, -// nq:0, -// xq:nil, +// ctx:nil, // }, // fields: fields { // core:nil, @@ -4563,9 +4561,7 @@ package service // return test { // name: "test_case_2", // args: args { -// k:0, -// nq:0, -// xq:nil, +// ctx:nil, // }, // fields: fields { // core:nil, @@ -4672,17 +4668,20 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// gotRes, err := f.Search(test.args.k, test.args.nq, test.args.xq) -// if err := checkFunc(test.want, gotRes, err); err != nil { +// err := f.CreateAndSaveIndex(test.args.ctx) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_Delete(t *testing.T) { +// func Test_faiss_Search(t *testing.T) { // type args struct { -// uuid string +// k uint32 +// nprobe uint32 +// nq uint32 +// xq []float32 // } // type fields struct { // core core.Faiss @@ -4723,21 +4722,25 @@ package service // kvsdbConcurrency int // } // type want struct { -// err error +// wantRes *payload.Search_Response +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, error) error +// checkFunc func(want, *payload.Search_Response, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { +// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } // return nil // } // tests := []test{ @@ -4746,7 +4749,10 @@ package service // { // name: "test_case_1", // args: args { -// uuid:"", +// k:0, +// nprobe:0, +// nq:0, +// xq:nil, // }, // fields: fields { // core:nil, @@ -4803,7 +4809,10 @@ package service // return test { // name: "test_case_2", // args: args { -// uuid:"", +// k:0, +// nprobe:0, +// nq:0, +// xq:nil, // }, // fields: fields { // core:nil, @@ -4910,18 +4919,17 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.Delete(test.args.uuid) -// if err := checkFunc(test.want, err); err != nil { +// gotRes, err := f.Search(test.args.k, test.args.nprobe, test.args.nq, test.args.xq) +// if err := checkFunc(test.want, gotRes, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_DeleteWithTime(t *testing.T) { +// func Test_faiss_Delete(t *testing.T) { // type args struct { // uuid string -// t int64 // } // type fields struct { // core core.Faiss @@ -4986,7 +4994,6 @@ package service // name: "test_case_1", // args: args { // uuid:"", -// t:0, // }, // fields: fields { // core:nil, @@ -5044,7 +5051,6 @@ package service // name: "test_case_2", // args: args { // uuid:"", -// t:0, // }, // fields: fields { // core:nil, @@ -5151,7 +5157,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.DeleteWithTime(test.args.uuid, test.args.t) +// err := f.Delete(test.args.uuid) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -5159,11 +5165,10 @@ package service // } // } // -// func Test_faiss_delete(t *testing.T) { +// func Test_faiss_DeleteWithTime(t *testing.T) { // type args struct { -// uuid string -// t int64 -// validation bool +// uuid string +// t int64 // } // type fields struct { // core core.Faiss @@ -5229,7 +5234,6 @@ package service // args: args { // uuid:"", // t:0, -// validation:false, // }, // fields: fields { // core:nil, @@ -5288,7 +5292,6 @@ package service // args: args { // uuid:"", // t:0, -// validation:false, // }, // fields: fields { // core:nil, @@ -5395,7 +5398,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// err := f.delete(test.args.uuid, test.args.t, test.args.validation) +// err := f.DeleteWithTime(test.args.uuid, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -5403,9 +5406,11 @@ package service // } // } // -// func Test_faiss_Exists(t *testing.T) { +// func Test_faiss_delete(t *testing.T) { // type args struct { -// uuid string +// uuid string +// t int64 +// validation bool // } // type fields struct { // core core.Faiss @@ -5446,24 +5451,20 @@ package service // kvsdbConcurrency int // } // type want struct { -// want uint32 -// want1 bool +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, uint32, bool) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got uint32, got1 bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) -// } -// if !reflect.DeepEqual(got1, w.want1) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got1, w.want1) +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } // return nil // } @@ -5474,6 +5475,8 @@ package service // name: "test_case_1", // args: args { // uuid:"", +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -5531,6 +5534,8 @@ package service // name: "test_case_2", // args: args { // uuid:"", +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -5637,15 +5642,18 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got, got1 := f.Exists(test.args.uuid) -// if err := checkFunc(test.want, got, got1); err != nil { +// err := f.delete(test.args.uuid, test.args.t, test.args.validation) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_IsIndexing(t *testing.T) { +// func Test_faiss_Exists(t *testing.T) { +// type args struct { +// uuid string +// } // type fields struct { // core core.Faiss // eg errgroup.Group @@ -5685,19 +5693,24 @@ package service // kvsdbConcurrency int // } // type want struct { -// want bool +// wantOid uint32 +// wantOk bool // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, bool) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, uint32, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotOid uint32, gotOk bool) error { +// if !reflect.DeepEqual(gotOid, w.wantOid) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOid, w.wantOid) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -5706,6 +5719,9 @@ package service // /* // { // name: "test_case_1", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -5746,10 +5762,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -5760,6 +5776,9 @@ package service // func() test { // return test { // name: "test_case_2", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -5800,10 +5819,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -5817,10 +5836,10 @@ package service // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -5865,15 +5884,18 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.IsIndexing() -// if err := checkFunc(test.want, got); err != nil { +// gotOid, gotOk := f.Exists(test.args.uuid) +// if err := checkFunc(test.want, gotOid, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_IsSaving(t *testing.T) { +// func Test_faiss_GetObject(t *testing.T) { +// type args struct { +// uuid string +// } // type fields struct { // core core.Faiss // eg errgroup.Group @@ -5913,19 +5935,28 @@ package service // kvsdbConcurrency int // } // type want struct { -// want bool +// wantVec []float32 +// wantTimestamp int64 +// err error // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, bool) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, []float32, int64, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) // } // return nil // } @@ -5934,6 +5965,9 @@ package service // /* // { // name: "test_case_1", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -5974,10 +6008,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -5988,6 +6022,9 @@ package service // func() test { // return test { // name: "test_case_2", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -6028,10 +6065,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -6045,10 +6082,10 @@ package service // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -6093,15 +6130,15 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.IsSaving() -// if err := checkFunc(test.want, got); err != nil { +// gotVec, gotTimestamp, err := f.GetObject(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotTimestamp, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_NumberOfCreateIndexExecution(t *testing.T) { +// func Test_faiss_IsIndexing(t *testing.T) { // type fields struct { // core core.Faiss // eg errgroup.Group @@ -6141,17 +6178,17 @@ package service // kvsdbConcurrency int // } // type want struct { -// want uint64 +// want bool // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, uint64) error +// checkFunc func(want, bool) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got uint64) error { +// defaultCheckFunc := func(w want, got bool) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -6321,7 +6358,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.NumberOfCreateIndexExecution() +// got := f.IsIndexing() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -6329,7 +6366,7 @@ package service // } // } // -// func Test_faiss_NumberOfProactiveGCExecution(t *testing.T) { +// func Test_faiss_IsSaving(t *testing.T) { // type fields struct { // core core.Faiss // eg errgroup.Group @@ -6369,17 +6406,17 @@ package service // kvsdbConcurrency int // } // type want struct { -// want uint64 +// want bool // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, uint64) error +// checkFunc func(want, bool) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got uint64) error { +// defaultCheckFunc := func(w want, got bool) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -6549,7 +6586,7 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.NumberOfProactiveGCExecution() +// got := f.IsSaving() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -6557,7 +6594,10 @@ package service // } // } // -// func Test_faiss_gc(t *testing.T) { +// func Test_faiss_UUIDs(t *testing.T) { +// type args struct { +// ctx context.Context +// } // type fields struct { // core core.Faiss // eg errgroup.Group @@ -6596,16 +6636,22 @@ package service // idelay time.Duration // kvsdbConcurrency int // } -// type want struct{} +// type want struct { +// wantUuids []string +// } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, []string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotUuids []string) error { +// if !reflect.DeepEqual(gotUuids, w.wantUuids) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotUuids, w.wantUuids) +// } // return nil // } // tests := []test{ @@ -6613,6 +6659,9 @@ package service // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -6653,10 +6702,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -6667,6 +6716,9 @@ package service // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -6707,10 +6759,10 @@ package service // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -6724,10 +6776,10 @@ package service // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -6772,15 +6824,15 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// f.gc() -// if err := checkFunc(test.want); err != nil { +// gotUuids := f.UUIDs(test.args.ctx) +// if err := checkFunc(test.want, gotUuids); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_faiss_Len(t *testing.T) { +// func Test_faiss_NumberOfCreateIndexExecution(t *testing.T) { // type fields struct { // core core.Faiss // eg errgroup.Group @@ -7000,7 +7052,686 @@ package service // kvsdbConcurrency: test.fields.kvsdbConcurrency, // } // -// got := f.Len() +// got := f.NumberOfCreateIndexExecution() +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_faiss_NumberOfProactiveGCExecution(t *testing.T) { +// type fields struct { +// core core.Faiss +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// addVecs []float32 +// addIds []int64 +// isTrained bool +// trainSize int +// icnt uint64 +// indexing atomic.Value +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// inMem bool +// dim int +// nlist int +// m int +// alen int +// dur time.Duration +// sdur time.Duration +// lim time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// dcd bool +// idelay time.Duration +// kvsdbConcurrency int +// } +// type want struct { +// want uint64 +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, uint64) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got uint64) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// f := &faiss{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// addVecs: test.fields.addVecs, +// addIds: test.fields.addIds, +// isTrained: test.fields.isTrained, +// trainSize: test.fields.trainSize, +// icnt: test.fields.icnt, +// indexing: test.fields.indexing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// nlist: test.fields.nlist, +// m: test.fields.m, +// alen: test.fields.alen, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// lim: test.fields.lim, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// dcd: test.fields.dcd, +// idelay: test.fields.idelay, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// } +// +// got := f.NumberOfProactiveGCExecution() +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_faiss_gc(t *testing.T) { +// type fields struct { +// core core.Faiss +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// addVecs []float32 +// addIds []int64 +// isTrained bool +// trainSize int +// icnt uint64 +// indexing atomic.Value +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// inMem bool +// dim int +// nlist int +// m int +// alen int +// dur time.Duration +// sdur time.Duration +// lim time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// dcd bool +// idelay time.Duration +// kvsdbConcurrency int +// } +// type want struct{} +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// f := &faiss{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// addVecs: test.fields.addVecs, +// addIds: test.fields.addIds, +// isTrained: test.fields.isTrained, +// trainSize: test.fields.trainSize, +// icnt: test.fields.icnt, +// indexing: test.fields.indexing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// nlist: test.fields.nlist, +// m: test.fields.m, +// alen: test.fields.alen, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// lim: test.fields.lim, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// dcd: test.fields.dcd, +// idelay: test.fields.idelay, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// } +// +// f.gc() +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_faiss_Len(t *testing.T) { +// type fields struct { +// core core.Faiss +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// addVecs []float32 +// addIds []int64 +// isTrained bool +// trainSize int +// icnt uint64 +// indexing atomic.Value +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// inMem bool +// dim int +// nlist int +// m int +// alen int +// dur time.Duration +// sdur time.Duration +// lim time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// dcd bool +// idelay time.Duration +// kvsdbConcurrency int +// } +// type want struct { +// want uint64 +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, uint64) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got uint64) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// f := &faiss{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// addVecs: test.fields.addVecs, +// addIds: test.fields.addIds, +// isTrained: test.fields.isTrained, +// trainSize: test.fields.trainSize, +// icnt: test.fields.icnt, +// indexing: test.fields.indexing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// nlist: test.fields.nlist, +// m: test.fields.m, +// alen: test.fields.alen, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// lim: test.fields.lim, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// dcd: test.fields.dcd, +// idelay: test.fields.idelay, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// } +// +// got := f.Len() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -8158,6 +8889,242 @@ package service // } // } // +// func Test_faiss_ListObjectFunc(t *testing.T) { +// type args struct { +// ctx context.Context +// fn func(uuid string, oid uint32, ts int64) bool +// } +// type fields struct { +// core core.Faiss +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// addVecs []float32 +// addIds []int64 +// isTrained bool +// trainSize int +// icnt uint64 +// indexing atomic.Value +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// inMem bool +// dim int +// nlist int +// m int +// alen int +// dur time.Duration +// sdur time.Duration +// lim time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// dcd bool +// idelay time.Duration +// kvsdbConcurrency int +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// fn:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// fn:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// addVecs:nil, +// addIds:nil, +// isTrained:false, +// trainSize:0, +// icnt:0, +// indexing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// inMem:false, +// dim:0, +// nlist:0, +// m:0, +// alen:0, +// dur:nil, +// sdur:nil, +// lim:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// dcd:false, +// idelay:nil, +// kvsdbConcurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// f := &faiss{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// addVecs: test.fields.addVecs, +// addIds: test.fields.addIds, +// isTrained: test.fields.isTrained, +// trainSize: test.fields.trainSize, +// icnt: test.fields.icnt, +// indexing: test.fields.indexing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// nlist: test.fields.nlist, +// m: test.fields.m, +// alen: test.fields.alen, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// lim: test.fields.lim, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// dcd: test.fields.dcd, +// idelay: test.fields.idelay, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// } +// +// f.ListObjectFunc(test.args.ctx, test.args.fn) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_faiss_toSearchResponse(t *testing.T) { // type args struct { // sr []algorithm.SearchResult diff --git a/pkg/agent/core/ngt/handler/grpc/index.go b/pkg/agent/core/ngt/handler/grpc/index.go index 446ebdab00..1c0dcbfea2 100644 --- a/pkg/agent/core/ngt/handler/grpc/index.go +++ b/pkg/agent/core/ngt/handler/grpc/index.go @@ -64,6 +64,9 @@ func (s *server) CreateIndex( }, }, info.Get())...) code = codes.FailedPrecondition + case errors.Is(err, errors.ErrFlushingIsInProgress): + err = status.WrapWithAborted("CreateIndex API aborted to process create indexes request due to flushing indices is in progress", err, details...) + code = codes.Aborted case errors.Is(err, context.Canceled): err = status.WrapWithCanceled(fmt.Sprintf("CreateIndex API canceled to create indexes pool_size = %d, error: %v", c.GetPoolSize(), err), err, details...) code = codes.Canceled @@ -149,6 +152,9 @@ func (s *server) CreateAndSaveIndex( }, }, info.Get())...) code = codes.FailedPrecondition + case errors.Is(err, errors.ErrFlushingIsInProgress): + err = status.WrapWithAborted("CreateAndSaveIndex API aborted to process create indexes request due to flushing indices is in progress", err, details...) + code = codes.Aborted case errors.Is(err, context.Canceled): err = status.WrapWithCanceled(fmt.Sprintf("CreateAndSaveIndex API canceled to create indexes pool_size = %d, error: %v", c.GetPoolSize(), err), err, details...) code = codes.Canceled diff --git a/pkg/agent/core/ngt/handler/grpc/insert.go b/pkg/agent/core/ngt/handler/grpc/insert.go index bb3584d6c2..821ba32518 100644 --- a/pkg/agent/core/ngt/handler/grpc/insert.go +++ b/pkg/agent/core/ngt/handler/grpc/insert.go @@ -162,10 +162,10 @@ func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) (err error) }() res, err := s.Insert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse Insert gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -181,10 +181,10 @@ func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse StreamInsert gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err diff --git a/pkg/agent/core/ngt/handler/grpc/linear_search.go b/pkg/agent/core/ngt/handler/grpc/linear_search.go index 764cf3ba4d..fc0a74f9cb 100644 --- a/pkg/agent/core/ngt/handler/grpc/linear_search.go +++ b/pkg/agent/core/ngt/handler/grpc/linear_search.go @@ -23,7 +23,6 @@ import ( "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" - "github.com/vdaas/vald/internal/net/grpc/codes" "github.com/vdaas/vald/internal/net/grpc/errdetails" "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/attribute" @@ -344,10 +343,10 @@ func (s *server) StreamLinearSearch(stream vald.Search_StreamLinearSearchServer) }() res, err := s.LinearSearch(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse LinearSearch gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -363,11 +362,10 @@ func (s *server) StreamLinearSearch(stream vald.Search_StreamLinearSearchServer) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse StreamLinearSearch gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -394,10 +392,10 @@ func (s *server) StreamLinearSearchByID( }() res, err := s.LinearSearchByID(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse LinearSearchByID gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -413,10 +411,10 @@ func (s *server) StreamLinearSearchByID( }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse StreamLinearSearchByID gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -454,15 +452,10 @@ func (s *server) MultiLinearSearch( }() r, err := s.LinearSearch(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse LinearSearch gRPC error response", - &errdetails.RequestInfo{ - RequestId: query.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(query), - }) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -480,22 +473,13 @@ func (s *server) MultiLinearSearch( } wg.Wait() if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, - "failed to parse MultiLinearSearch gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(rids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: ngtResourceType + "/ngt.MultiLinearSearch", - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }) - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return nil, err + return nil, errs } return res, nil } @@ -530,15 +514,10 @@ func (s *server) MultiLinearSearchByID( defer wg.Done() r, err := s.LinearSearchByID(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse LinearSearchByID gRPC error response", - &errdetails.RequestInfo{ - RequestId: query.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(query), - }) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -556,22 +535,13 @@ func (s *server) MultiLinearSearchByID( } wg.Wait() if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, - "failed to parse MultiLinearSearchByID gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(rids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: ngtResourceType + "/ngt.MultiLinearSearchByID", - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }) - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return nil, err + return nil, errs } return res, nil } diff --git a/pkg/agent/core/ngt/handler/grpc/object.go b/pkg/agent/core/ngt/handler/grpc/object.go index 675b36e273..27865a5fcb 100644 --- a/pkg/agent/core/ngt/handler/grpc/object.go +++ b/pkg/agent/core/ngt/handler/grpc/object.go @@ -151,10 +151,10 @@ func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) (err }() res, err := s.GetObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse GetObject gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamVector{ @@ -170,10 +170,10 @@ func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) (err }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse StreamGetObject gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } diff --git a/pkg/agent/core/ngt/handler/grpc/object_test.go b/pkg/agent/core/ngt/handler/grpc/object_test.go index 51fd595aa2..aba697e53b 100644 --- a/pkg/agent/core/ngt/handler/grpc/object_test.go +++ b/pkg/agent/core/ngt/handler/grpc/object_test.go @@ -1300,7 +1300,7 @@ func Test_server_StreamListObject(t *testing.T) { // Call the method under test err = s.StreamListObject(&payload.Object_List_Request{}, &stream) - // Check the errros are joined and its a gRPC error + // Check the errors are joined and its a gRPC error require.ErrorContains(t, err, "foo") require.ErrorContains(t, err, "bar") _, ok := status.FromError(err) diff --git a/pkg/agent/core/ngt/handler/grpc/option.go b/pkg/agent/core/ngt/handler/grpc/option.go index 6955b01400..cf9f523da5 100644 --- a/pkg/agent/core/ngt/handler/grpc/option.go +++ b/pkg/agent/core/ngt/handler/grpc/option.go @@ -18,12 +18,12 @@ package grpc import ( - "os" "runtime" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net" + "github.com/vdaas/vald/internal/os" "github.com/vdaas/vald/internal/sync/errgroup" "github.com/vdaas/vald/pkg/agent/core/ngt/service" ) diff --git a/pkg/agent/core/ngt/handler/grpc/remove.go b/pkg/agent/core/ngt/handler/grpc/remove.go index c47c3c9610..29c82d99af 100644 --- a/pkg/agent/core/ngt/handler/grpc/remove.go +++ b/pkg/agent/core/ngt/handler/grpc/remove.go @@ -23,7 +23,6 @@ import ( "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" - "github.com/vdaas/vald/internal/net/grpc/codes" "github.com/vdaas/vald/internal/net/grpc/errdetails" "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/attribute" @@ -157,10 +156,10 @@ func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) }() res, err := s.Remove(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse Remove gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -176,10 +175,10 @@ func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse StreamRemove gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -314,23 +313,14 @@ func (s *server) RemoveByTimestamp( return true }) if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, - "failed to parse "+vald.RemoveByTimestampRPCName+" gRPC error response", - &errdetails.RequestInfo{ - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: ngtResourceType + "/ngt.Remove", - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, - ) - log.Error(err) - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + log.Error(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return nil, err + return nil, errs } if locs == nil || len(locs.GetLocations()) == 0 { err := status.WrapWithNotFound( diff --git a/pkg/agent/core/ngt/handler/grpc/search.go b/pkg/agent/core/ngt/handler/grpc/search.go index 2ef060fcc5..c75be15101 100644 --- a/pkg/agent/core/ngt/handler/grpc/search.go +++ b/pkg/agent/core/ngt/handler/grpc/search.go @@ -23,7 +23,6 @@ import ( "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" - "github.com/vdaas/vald/internal/net/grpc/codes" "github.com/vdaas/vald/internal/net/grpc/errdetails" "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/attribute" @@ -348,10 +347,10 @@ func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) (err error) }() res, err := s.Search(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse Search gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -367,11 +366,10 @@ func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse StreamSearch gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -396,10 +394,10 @@ func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) (er }() res, err := s.SearchByID(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse SearchByID gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -415,10 +413,10 @@ func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) (er }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse StreamSearchByID gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -456,15 +454,10 @@ func (s *server) MultiSearch( }() r, err := s.Search(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse Search gRPC error response", - &errdetails.RequestInfo{ - RequestId: query.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(query), - }) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -482,22 +475,13 @@ func (s *server) MultiSearch( } wg.Wait() if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, - "failed to parse MultiSearch gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(rids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: ngtResourceType + "/ngt.MultiSearch", - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }) - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return nil, err + return nil, errs } return res, nil } @@ -532,15 +516,10 @@ func (s *server) MultiSearchByID( defer wg.Done() r, err := s.SearchByID(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse SearchByID gRPC error response", - &errdetails.RequestInfo{ - RequestId: query.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(query), - }) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -558,22 +537,13 @@ func (s *server) MultiSearchByID( } wg.Wait() if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, - "failed to parse MultiSearchByID gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(rids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: ngtResourceType + "/ngt.MultiSearchByID", - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }) - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return nil, err + return nil, errs } return res, nil } diff --git a/pkg/agent/core/ngt/handler/grpc/update.go b/pkg/agent/core/ngt/handler/grpc/update.go index a8c8c8db42..2e630ad2d5 100644 --- a/pkg/agent/core/ngt/handler/grpc/update.go +++ b/pkg/agent/core/ngt/handler/grpc/update.go @@ -23,7 +23,6 @@ import ( "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" - "github.com/vdaas/vald/internal/net/grpc/codes" "github.com/vdaas/vald/internal/net/grpc/errdetails" "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/attribute" @@ -191,10 +190,10 @@ func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) (err error) }() res, err := s.Update(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse Update gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -210,10 +209,10 @@ func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse StreamUpdate gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -372,3 +371,100 @@ func (s *server) MultiUpdate( } return s.newLocations(uuids...), nil } + +func (s *server) UpdateTimestamp( + ctx context.Context, req *payload.Update_TimestampRequest, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + uuid := req.GetId() + reqInfo := &errdetails.RequestInfo{ + RequestId: uuid, + ServingData: errdetails.Serialize(req), + } + resInfo := &errdetails.ResourceInfo{ + ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateTimestampRPCName + "." + vald.GetObjectRPCName, + ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), + } + if len(uuid) == 0 { + err = errors.ErrInvalidUUID(uuid) + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid uuid", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "invalid id", + Description: err.Error(), + }, + }, + }) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + ts := req.GetTimestamp() + if !req.GetForce() && ts < 0 { + err = errors.ErrInvalidTimestamp(ts) + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid vector argument", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "timestamp", + Description: err.Error(), + }, + }, + }, info.Get()) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + err = s.ngt.UpdateTimestamp(uuid, ts, req.GetForce()) + if err != nil { + var attrs []attribute.KeyValue + if errors.Is(err, errors.ErrFlushingIsInProgress) { + err = status.WrapWithAborted(vald.UpdateTimestampRPCName+" API aborted to process update request due to flushing indices is in progress", err, reqInfo, resInfo) + log.Warn(err) + attrs = trace.StatusCodeAborted(err.Error()) + } else if errors.Is(err, errors.ErrObjectNotFound(nil, uuid)) { + err = status.WrapWithNotFound(fmt.Sprintf(vald.UpdateTimestampRPCName+" API uuid %s's data not found", uuid), err, reqInfo, resInfo) + log.Warn(err) + attrs = trace.StatusCodeNotFound(err.Error()) + } else if errors.Is(err, errors.ErrZeroTimestamp) || errors.Is(err, errors.ErrUUIDNotFound(0)) { + err = status.WrapWithInvalidArgument(fmt.Sprintf(vald.UpdateTimestampRPCName+" API invalid argument for uuid \"%s\" detected", uuid), err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "uuid, timestamp", + Description: err.Error(), + }, + }, + }) + log.Warn(err) + attrs = trace.StatusCodeInvalidArgument(err.Error()) + } else if errors.Is(err, errors.ErrNewerTimestampObjectAlreadyExists(uuid, ts)) { + err = status.WrapWithAlreadyExists(fmt.Sprintf(vald.UpdateTimestampRPCName+" API uuid %s's newer timestamp already exists", uuid), err, reqInfo, resInfo) + log.Warn(err) + attrs = trace.StatusCodeAlreadyExists(err.Error()) + } else { + err = status.WrapWithInternal(vald.UpdateTimestampRPCName+" API failed", err, reqInfo, resInfo, info.Get()) + log.Error(err) + attrs = trace.StatusCodeInternal(err.Error()) + } + if span != nil { + span.RecordError(err) + span.SetAttributes(attrs...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + return s.newLocation(uuid), nil +} diff --git a/pkg/agent/core/ngt/handler/grpc/update_test.go b/pkg/agent/core/ngt/handler/grpc/update_test.go index 86bf5bb02f..c24a4857cc 100644 --- a/pkg/agent/core/ngt/handler/grpc/update_test.go +++ b/pkg/agent/core/ngt/handler/grpc/update_test.go @@ -1117,3 +1117,132 @@ func Test_server_Update(t *testing.T) { // }) // } // } +// +// func Test_server_UpdateTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// req *payload.Update_TimestampRequest +// } +// type fields struct { +// name string +// ip string +// ngt service.NGT +// eg errgroup.Group +// streamConcurrency int +// UnimplementedAgentServer agent.UnimplementedAgentServer +// UnimplementedValdServer vald.UnimplementedValdServer +// } +// type want struct { +// wantRes *payload.Object_Location +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Location, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// req:nil, +// }, +// fields: fields { +// name:"", +// ip:"", +// ngt:nil, +// eg:nil, +// streamConcurrency:0, +// UnimplementedAgentServer:nil, +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// req:nil, +// }, +// fields: fields { +// name:"", +// ip:"", +// ngt:nil, +// eg:nil, +// streamConcurrency:0, +// UnimplementedAgentServer:nil, +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// s := &server{ +// name: test.fields.name, +// ip: test.fields.ip, +// ngt: test.fields.ngt, +// eg: test.fields.eg, +// streamConcurrency: test.fields.streamConcurrency, +// UnimplementedAgentServer: test.fields.UnimplementedAgentServer, +// UnimplementedValdServer: test.fields.UnimplementedValdServer, +// } +// +// gotRes, err := s.UpdateTimestamp(test.args.ctx, test.args.req) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/agent/core/ngt/handler/grpc/upsert.go b/pkg/agent/core/ngt/handler/grpc/upsert.go index 6640882785..25602bb491 100644 --- a/pkg/agent/core/ngt/handler/grpc/upsert.go +++ b/pkg/agent/core/ngt/handler/grpc/upsert.go @@ -20,7 +20,6 @@ import ( "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/errors" - "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/grpc/codes" @@ -28,7 +27,6 @@ import ( "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/safety" - "github.com/vdaas/vald/internal/strings" "github.com/vdaas/vald/internal/sync" ) @@ -152,10 +150,10 @@ func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) (err error) }() res, err := s.Upsert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse Upsert gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -171,10 +169,10 @@ func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse StreamUpsert gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -318,19 +316,10 @@ func (s *server) MultiUpsert( } if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse MultiUpsert gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(ids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: ngtResourceType + "/ngt.MultiUpsert", - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get()) - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err diff --git a/pkg/agent/core/ngt/service/ngt.go b/pkg/agent/core/ngt/service/ngt.go index 3970ac1cf9..66fc3fbf2f 100644 --- a/pkg/agent/core/ngt/service/ngt.go +++ b/pkg/agent/core/ngt/service/ngt.go @@ -48,125 +48,129 @@ import ( "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/internal/sync/errgroup" "github.com/vdaas/vald/pkg/agent/internal/kvs" + "github.com/vdaas/vald/pkg/agent/internal/memstore" "github.com/vdaas/vald/pkg/agent/internal/metadata" "github.com/vdaas/vald/pkg/agent/internal/vqueue" ) -type contextSaveIndexTimeKey string - -type NGT interface { - Start(ctx context.Context) <-chan error - Search(ctx context.Context, vec []float32, size uint32, epsilon, radius float32) (*payload.Search_Response, error) - SearchByID(ctx context.Context, uuid string, size uint32, epsilon, radius float32) ([]float32, *payload.Search_Response, error) - LinearSearch(ctx context.Context, vec []float32, size uint32) (*payload.Search_Response, error) - LinearSearchByID(ctx context.Context, uuid string, size uint32) ([]float32, *payload.Search_Response, error) - Insert(uuid string, vec []float32) (err error) - InsertWithTime(uuid string, vec []float32, t int64) (err error) - InsertMultiple(vecs map[string][]float32) (err error) - InsertMultipleWithTime(vecs map[string][]float32, t int64) (err error) - Update(uuid string, vec []float32) (err error) - UpdateWithTime(uuid string, vec []float32, t int64) (err error) - UpdateMultiple(vecs map[string][]float32) (err error) - UpdateMultipleWithTime(vecs map[string][]float32, t int64) (err error) - Delete(uuid string) (err error) - DeleteWithTime(uuid string, t int64) (err error) - DeleteMultiple(uuids ...string) (err error) - DeleteMultipleWithTime(uuids []string, t int64) (err error) - RegenerateIndexes(ctx context.Context) (err error) - GetObject(uuid string) (vec []float32, timestamp int64, err error) - ListObjectFunc(ctx context.Context, f func(uuid string, oid uint32, timestamp int64) bool) - CreateIndex(ctx context.Context, poolSize uint32) (err error) - SaveIndex(ctx context.Context) (err error) - Exists(string) (uint32, bool) - CreateAndSaveIndex(ctx context.Context, poolSize uint32) (err error) - IsIndexing() bool - IsFlushing() bool - IsSaving() bool - Len() uint64 - NumberOfCreateIndexExecution() uint64 - NumberOfProactiveGCExecution() uint64 - UUIDs(context.Context) (uuids []string) - DeleteVQueueBufferLen() uint64 - InsertVQueueBufferLen() uint64 - GetDimensionSize() int - BrokenIndexCount() uint64 - IndexStatistics() (*payload.Info_Index_Statistics, error) - IsStatisticsEnabled() bool - IndexProperty() (*payload.Info_Index_Property, error) - Close(ctx context.Context) error -} - -type ngt struct { - // instances - core core.NGT - eg errgroup.Group - kvs kvs.BidiMap - fmu sync.Mutex - fmap map[string]int64 // failure map for index - vq vqueue.Queue - - // statuses - indexing atomic.Value - flushing atomic.Bool - saving atomic.Value - cimu sync.Mutex // create index mutex - lastNocie uint64 // last number of create index execution this value prevent unnecessary saveindex. - - // counters - nocie uint64 // number of create index execution - nogce uint64 // number of proactive GC execution - wfci uint64 // wait for create indexing - nobic uint64 // number of broken index count - nopvq atomic.Uint64 // number of processed vq number - - // parameters - cfg *config.NGT - opts []Option - - // configurations - inMem bool // in-memory mode - dim int // dimension size - alen int // auto indexing length - - lim time.Duration // auto indexing time limit - dur time.Duration // auto indexing check duration - sdur time.Duration // auto save index check duration - - minLit time.Duration // minimum load index timeout - maxLit time.Duration // maximum load index timeout - litFactor time.Duration // load index timeout factor - - enableProactiveGC bool // if this value is true, agent component will purge GC memory more proactive - enableCopyOnWrite bool // if this value is true, agent component will write backup file using Copy on Write and saves old files to the old directory - - podName string - podNamespace string - path string // index path - smu sync.Mutex // save index lock - tmpPath atomic.Value // temporary index path for Copy on Write - oldPath string // old volume path - basePath string // index base directory for CoW - brokenPath string // backup broken index path - cowmu sync.Mutex // copy on write move lock - - poolSize uint32 // default pool size - radius float32 // default radius - epsilon float32 // default epsilon - - idelay time.Duration // initial delay duration - dcd bool // disable commit daemon - - kvsdbConcurrency int // kvsdb concurrency - historyLimit int // the maximum generation number of broken index backup - - isReadReplica bool - enableExportIndexInfo bool - exportIndexInfoDuration time.Duration - patcher client.Patcher - - enableStatistics bool - statisticsCache atomic.Pointer[payload.Info_Index_Statistics] -} +type ( + NGT interface { + Start(ctx context.Context) <-chan error + Search(ctx context.Context, vec []float32, size uint32, epsilon, radius float32) (*payload.Search_Response, error) + SearchByID(ctx context.Context, uuid string, size uint32, epsilon, radius float32) ([]float32, *payload.Search_Response, error) + LinearSearch(ctx context.Context, vec []float32, size uint32) (*payload.Search_Response, error) + LinearSearchByID(ctx context.Context, uuid string, size uint32) ([]float32, *payload.Search_Response, error) + Insert(uuid string, vec []float32) (err error) + InsertWithTime(uuid string, vec []float32, t int64) (err error) + InsertMultiple(vecs map[string][]float32) (err error) + InsertMultipleWithTime(vecs map[string][]float32, t int64) (err error) + Update(uuid string, vec []float32) (err error) + UpdateWithTime(uuid string, vec []float32, t int64) (err error) + UpdateMultiple(vecs map[string][]float32) (err error) + UpdateMultipleWithTime(vecs map[string][]float32, t int64) (err error) + UpdateTimestamp(uuid string, ts int64, force bool) (err error) + Delete(uuid string) (err error) + DeleteWithTime(uuid string, t int64) (err error) + DeleteMultiple(uuids ...string) (err error) + DeleteMultipleWithTime(uuids []string, t int64) (err error) + RegenerateIndexes(ctx context.Context) (err error) + GetObject(uuid string) (vec []float32, timestamp int64, err error) + ListObjectFunc(ctx context.Context, f func(uuid string, oid uint32, timestamp int64) bool) + Exists(uuid string) (uint32, bool) + CreateIndex(ctx context.Context, poolSize uint32) (err error) + SaveIndex(ctx context.Context) (err error) + CreateAndSaveIndex(ctx context.Context, poolSize uint32) (err error) + IsIndexing() bool + IsFlushing() bool + IsSaving() bool + Len() uint64 + NumberOfCreateIndexExecution() uint64 + NumberOfProactiveGCExecution() uint64 + UUIDs(context.Context) (uuids []string) + InsertVQueueBufferLen() uint64 + DeleteVQueueBufferLen() uint64 + GetDimensionSize() int + BrokenIndexCount() uint64 + IndexStatistics() (*payload.Info_Index_Statistics, error) + IsStatisticsEnabled() bool + IndexProperty() (*payload.Info_Index_Property, error) + Close(ctx context.Context) error + } + + ngt struct { + // instances + core core.NGT + eg errgroup.Group + kvs kvs.BidiMap + fmu sync.Mutex + fmap map[string]int64 // failure map for index + vq vqueue.Queue + + // statuses + indexing atomic.Value + flushing atomic.Bool + saving atomic.Value + cimu sync.Mutex // create index mutex + lastNocie uint64 // last number of create index execution this value prevent unnecessary saveindex. + + // counters + nocie uint64 // number of create index execution + nogce uint64 // number of proactive GC execution + wfci uint64 // wait for create indexing + nobic uint64 // number of broken index count + nopvq atomic.Uint64 // number of processed vq number + + // parameters + cfg *config.NGT + opts []Option + + // configurations + inMem bool // in-memory mode + dim int // dimension size + alen int // auto indexing length + + lim time.Duration // auto indexing time limit + dur time.Duration // auto indexing check duration + sdur time.Duration // auto save index check duration + + minLit time.Duration // minimum load index timeout + maxLit time.Duration // maximum load index timeout + litFactor time.Duration // load index timeout factor + + enableProactiveGC bool // if this value is true, agent component will purge GC memory more proactive + enableCopyOnWrite bool // if this value is true, agent component will write backup file using Copy on Write and saves old files to the old directory + + podName string + podNamespace string + path string // index path + smu sync.Mutex // save index lock + tmpPath atomic.Value // temporary index path for Copy on Write + oldPath string // old volume path + basePath string // index base directory for CoW + brokenPath string // backup broken index path + cowmu sync.Mutex // copy on write move lock + + poolSize uint32 // default pool size + radius float32 // default radius + epsilon float32 // default epsilon + + idelay time.Duration // initial delay duration + dcd bool // disable commit daemon + + kvsdbConcurrency int // kvsdb concurrency + historyLimit int // the maximum generation number of broken index backup + + isReadReplica bool + enableExportIndexInfo bool + exportIndexInfoDuration time.Duration + patcher client.Patcher + + enableStatistics bool + statisticsCache atomic.Pointer[payload.Info_Index_Statistics] + } + + contextSaveIndexTimeKey string +) const ( kvsFileName = "ngt-meta.kvsdb" @@ -260,6 +264,26 @@ func newNGT(cfg *config.NGT, opts ...Option) (n *ngt, err error) { return n, nil } +func (n *ngt) copyNGT(src *ngt) { + // instances + n.core = src.core + n.kvs = src.kvs + n.fmap = src.fmap + n.vq = src.vq + + // counters + n.wfci = src.wfci + n.nobic = src.nobic + n.nopvq = atomic.Uint64{} + + // paths + n.path = src.path + n.tmpPath = src.tmpPath + n.oldPath = src.oldPath + n.basePath = src.basePath + n.brokenPath = src.brokenPath +} + // migrate migrates the index directory from old to new under the input path if necessary. // Migration happens when the path is not empty and there is no `path/origin` directory, // which indicates that the user has NOT been using CoW mode and the index directory is not migrated yet. @@ -531,7 +555,7 @@ func (n *ngt) load(ctx context.Context, path string, opts ...core.Option) (err e // backupBroken backup index at originPath into brokenDir. // The name of the directory will be timestamp(UnixNano). -// If it exeeds the limit, backupBroken removes the oldest backup directory. +// If it exceeds the limit, backupBroken removes the oldest backup directory. func (n *ngt) backupBroken(ctx context.Context) error { if n.historyLimit <= 0 { return nil @@ -908,7 +932,7 @@ func (n *ngt) Start(ctx context.Context) <-chan error { } return ctx.Err() case <-tick.C: - if n.vq.IVQLen() >= n.alen { + if n.vq != nil && !n.IsFlushing() && n.vq.IVQLen() >= n.alen { err = n.CreateIndex(ctx, n.poolSize) } case <-limit.C: @@ -1098,7 +1122,7 @@ func (n *ngt) UpdateWithTime(uuid string, vec []float32, t int64) (err error) { } func (n *ngt) update(uuid string, vec []float32, t int64) (err error) { - if err = n.readyForUpdate(uuid, vec); err != nil { + if err = n.readyForUpdate(uuid, vec, t); err != nil { return err } err = n.delete(uuid, t, true) // `true` is to return NotFound error with non-existent ID @@ -1129,7 +1153,7 @@ func (n *ngt) UpdateMultipleWithTime(vecs map[string][]float32, t int64) (err er func (n *ngt) updateMultiple(vecs map[string][]float32, t int64) (err error) { uuids := make([]string, 0, len(vecs)) for uuid, vec := range vecs { - if err = n.readyForUpdate(uuid, vec); err != nil { + if err = n.readyForUpdate(uuid, vec, t); err != nil { delete(vecs, uuid) } else { uuids = append(uuids, uuid) @@ -1143,6 +1167,15 @@ func (n *ngt) updateMultiple(vecs map[string][]float32, t int64) (err error) { return n.insertMultiple(vecs, t, false) } +func (n *ngt) UpdateTimestamp(uuid string, ts int64, force bool) (err error) { + if n.IsFlushing() { + return errors.ErrFlushingIsInProgress + } + return memstore.UpdateTimestamp(n.kvs, n.vq, uuid, ts, force, func(oid uint32) ([]float32, error) { + return n.core.GetVector(uint(oid)) + }) +} + func (n *ngt) Delete(uuid string) (err error) { if n.IsFlushing() { return errors.ErrFlushingIsInProgress @@ -1167,7 +1200,10 @@ func (n *ngt) delete(uuid string, t int64, validation bool) (err error) { } if validation { _, _, ok := n.kvs.Get(uuid) - if !ok && !n.vq.IVExists(uuid) { + if !ok && func() (ok bool) { + _, ok = n.vq.IVExists(uuid) + return !ok + }() { return errors.ErrObjectIDNotFound(uuid) } } @@ -1242,14 +1278,12 @@ func (n *ngt) RegenerateIndexes(ctx context.Context) (err error) { if err != nil { log.Errorf("failed to flushing vector to ngt index in delete kvs. error: %v", err) } - n.kvs = nil - n.vq = nil // gc runtime.GC() atomic.AddUint64(&n.nogce, 1) - if n.inMem { + if !n.inMem { // delete file err = file.DeleteDir(ctx, n.path) if err != nil { @@ -1265,30 +1299,14 @@ func (n *ngt) RegenerateIndexes(ctx context.Context) (err error) { } } - nkvs := kvs.New(kvs.WithConcurrency(n.kvsdbConcurrency)) - - nvq, err := vqueue.New() - if err != nil { - log.Errorf("failed to create new vector vector queue. error: %v", err) - } - // renew instance nn, err := newNGT(n.cfg, n.opts...) if err != nil { return err } - nn.kvs = nkvs - nn.vq = nvq - - // Regenerate with flags set - nn.flushing.Store(true) - nn.indexing.Store(true) - defer nn.flushing.Store(false) - defer nn.indexing.Store(false) + n.copyNGT(nn) - n = nn - - return nil + return n.loadStatistics(ctx) } func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { @@ -1299,8 +1317,11 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { } }() - if n.isReadReplica { + switch { + case n.isReadReplica: return errors.ErrWriteOperationToReadReplica + case n.IsFlushing(): + return errors.ErrFlushingIsInProgress } ic := n.vq.IVQLen() + n.vq.DVQLen() @@ -1344,7 +1365,7 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { } log.Infof("create index operation started, uncommitted indexes = %d", ic) log.Debug("create index delete phase started") - // vqProcessedCnt is a tempral counter to store the number of processed vqueue items. + // vqProcessedCnt is a temporary counter to store the number of processed vqueue items. // This will be added to nopvq after CreateIndex operation succeeds. var vqProcessedCnt uint64 n.vq.RangePopDelete(ctx, now, func(uuid string) bool { @@ -1355,7 +1376,7 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { return true } log.Debugf("start remove operation for ngt index id: %s, oid: %d", uuid, oid) - if err := n.core.Remove(uint(oid)); err != nil { + if err = n.core.Remove(uint(oid)); err != nil { log.Errorf("failed to remove oid: %d from ngt index. error: %v", oid, err) n.fmu.Lock() n.fmap[uuid] = int64(oid) @@ -1372,7 +1393,8 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { var icnt uint32 n.vq.RangePopInsert(ctx, now, func(uuid string, vector []float32, timestamp int64) bool { log.Debugf("start insert operation for ngt index id: %s", uuid) - oid, err := n.core.Insert(vector) + var oid uint + oid, err = n.core.Insert(vector) if err != nil { log.Warnf("failed to insert vector uuid: %s vec: %v to ngt index. error: %v", uuid, vector, err) if errors.Is(err, errors.ErrIncompatibleDimensionSize(len(vector), n.dim)) { @@ -1428,9 +1450,24 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { return err } } + return n.loadStatistics(ctx) +} + +func (n *ngt) loadStatistics(ctx context.Context) (err error) { if n.IsStatisticsEnabled() { log.Info("loading index statistics to cache") - stats, err := n.core.GetGraphStatistics(core.AdditionalStatistics) + var stats *core.GraphStatistics + done := make(chan struct{}) + n.eg.Go(safety.RecoverFunc(func() error { + defer close(done) + stats, err = n.core.GetGraphStatistics(ctx, core.AdditionalStatistics) + return nil + })) + select { + case <-ctx.Done(): + return ctx.Err() + case <-done: + } if err != nil { log.Errorf("failed to load index statistics to cache: %v", err) return err @@ -1471,8 +1508,7 @@ func (n *ngt) CreateIndex(ctx context.Context, poolSize uint32) (err error) { IndegreeHistogram: stats.IndegreeHistogram, }) } - - return err + return nil } func (n *ngt) removeInvalidIndex(ctx context.Context) { @@ -1570,7 +1606,7 @@ func (n *ngt) saveIndex(ctx context.Context) (err error) { beforeNopvq := n.nopvq.Load() defer n.gc() - // since defering here, atomic operations are guaranteed in this scope + // since deferring here, atomic operations are guaranteed in this scope defer n.saving.Store(false) log.Debug("cleanup invalid index started") @@ -1578,7 +1614,7 @@ func (n *ngt) saveIndex(ctx context.Context) (err error) { log.Debug("cleanup invalid index finished") eg, ectx := errgroup.New(ctx) - // we want to ensure the acutal kvs size between kvsdb and metadata, + // we want to ensure the actual kvs size between kvsdb and metadata, // so we create this counter to count the actual kvs size instead of using kvs.Len() var ( kvsLen uint64 @@ -1830,59 +1866,23 @@ func (n *ngt) Exists(uuid string) (oid uint32, ok bool) { uuid, errors.ErrFlushingIsInProgress) return 0, false } - ok = n.vq.IVExists(uuid) - if !ok { - oid, _, ok = n.kvs.Get(uuid) - if !ok { - log.Debugf("Exists\tuuid: %s's data not found in kvsdb and insert vqueue\terror: %v", uuid, errors.ErrObjectIDNotFound(uuid)) - return 0, false - } - if n.vq.DVExists(uuid) { - log.Debugf( - "Exists\tuuid: %s's data found in kvsdb and not found in insert vqueue, but delete vqueue data exists. the object will be delete soon\terror: %v", - uuid, - errors.ErrObjectIDNotFound(uuid), - ) - return 0, false - } - } - return oid, ok + return memstore.Exists(n.kvs, n.vq, uuid) } func (n *ngt) GetObject(uuid string) (vec []float32, timestamp int64, err error) { - vec, ts, exists := n.vq.GetVector(uuid) - if exists { - return vec, ts, nil - } - - oid, ts, ok := n.kvs.Get(uuid) - if !ok { - log.Debugf("GetObject\tuuid: %s's data not found in kvsdb and insert vqueue", uuid) - return nil, 0, errors.ErrObjectIDNotFound(uuid) - } - - if n.vq.DVExists(uuid) { - log.Debugf("GetObject\tuuid: %s's data found in kvsdb and not found in insert vqueue, but delete vqueue data exists. the object will be delete soon", uuid) - return nil, 0, errors.ErrObjectIDNotFound(uuid) - } - - vec, err = n.core.GetVector(uint(oid)) - if err != nil { - log.Debugf("GetObject\tuuid: %s oid: %d's vector not found in ngt index", uuid, oid) - return nil, 0, errors.ErrObjectNotFound(err, uuid) - } - - return vec, ts, nil + return memstore.GetObject(n.kvs, n.vq, uuid, func(oid uint32) ([]float32, error) { + return n.core.GetVector(uint(oid)) + }) } -func (n *ngt) readyForUpdate(uuid string, vec []float32) (err error) { +func (n *ngt) readyForUpdate(uuid string, vec []float32, ts int64) (err error) { if len(uuid) == 0 { return errors.ErrUUIDNotFound(0) } if len(vec) != n.GetDimensionSize() { return errors.ErrInvalidDimensionSize(len(vec), n.GetDimensionSize()) } - ovec, _, err := n.GetObject(uuid) + ovec, ots, err := n.GetObject(uuid) // if error (GetObject cannot find vector) return error if err != nil { return err @@ -1891,6 +1891,14 @@ func (n *ngt) readyForUpdate(uuid string, vec []float32) (err error) { if len(vec) != len(ovec) || conv.F32stos(vec) != conv.F32stos(ovec) { return nil } + + if ots < ts { + err = n.UpdateTimestamp(uuid, ts, false) + if err != nil { + return err + } + } + // if no difference exists (same vector already exists) return error for skip update return errors.ErrUUIDAlreadyExists(uuid) } @@ -1910,15 +1918,7 @@ func (n *ngt) IsFlushing() bool { } func (n *ngt) UUIDs(ctx context.Context) (uuids []string) { - uuids = make([]string, 0, n.kvs.Len()) - var mu sync.Mutex - n.kvs.Range(ctx, func(uuid string, oid uint32, _ int64) bool { - mu.Lock() - uuids = append(uuids, uuid) - mu.Unlock() - return true - }) - return uuids + return memstore.UUIDs(ctx, n.kvs, n.vq) } func (n *ngt) NumberOfCreateIndexExecution() uint64 { @@ -1941,15 +1941,24 @@ func (n *ngt) gc() { } func (n *ngt) Len() uint64 { - return n.kvs.Len() + if n.kvs != nil && !n.IsFlushing() { + return n.kvs.Len() + } + return 0 } func (n *ngt) InsertVQueueBufferLen() uint64 { - return uint64(n.vq.IVQLen()) + if n.vq != nil && !n.IsFlushing() { + return uint64(n.vq.IVQLen()) + } + return 0 } func (n *ngt) DeleteVQueueBufferLen() uint64 { - return uint64(n.vq.DVQLen()) + if n.vq != nil && !n.IsFlushing() { + return uint64(n.vq.DVQLen()) + } + return 0 } func (n *ngt) GetDimensionSize() int { @@ -2009,25 +2018,7 @@ func (n *ngt) BrokenIndexCount() uint64 { // Use this function for performing something on each object with caring about the memory usage. // If the vector exists in the vqueue, this vector is not indexed so the oid(object ID) is processed as 0. func (n *ngt) ListObjectFunc(ctx context.Context, f func(uuid string, oid uint32, ts int64) bool) { - dup := make(map[string]bool) - n.vq.Range(ctx, func(uuid string, vec []float32, ts int64) (ok bool) { - ok = f(uuid, 0, ts) - if !ok { - return false - } - var kts int64 - _, kts, ok = n.kvs.Get(uuid) - if ok && ts > kts { - dup[uuid] = true - } - return true - }) - n.kvs.Range(ctx, func(uuid string, oid uint32, ts int64) (ok bool) { - if dup[uuid] { - return true - } - return f(uuid, oid, ts) - }) + memstore.ListObjectFunc(ctx, n.kvs, n.vq, f) } func (n *ngt) IndexStatistics() (stats *payload.Info_Index_Statistics, err error) { diff --git a/pkg/agent/core/ngt/service/ngt_test.go b/pkg/agent/core/ngt/service/ngt_test.go index 3b3e355682..2c3591184d 100644 --- a/pkg/agent/core/ngt/service/ngt_test.go +++ b/pkg/agent/core/ngt/service/ngt_test.go @@ -650,7 +650,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns false when it's an initaial state", + name: "returns false when it's an initial state", args: args{ path: tmpDir, }, @@ -707,7 +707,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns true when mets.IsInvalid is true", + name: "returns true when meta.IsInvalid is true", args: args{ path: tmpDir, }, @@ -738,7 +738,7 @@ func Test_needsBackup(t *testing.T) { tmpDir := t.TempDir() validIndexDir := testdata.GetTestdataPath(testdata.ValidIndex) return test{ - name: "returns true when mets.IsInvalid is true", + name: "returns true when meta.IsInvalid is true", args: args{ path: tmpDir, }, @@ -1370,7 +1370,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { return } type args struct { - idxes []index + indices []index poolSize uint32 bulkSize int } @@ -1444,7 +1444,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { return test{ name: fmt.Sprintf("insert & upsert %d random and 11 digits added to each vector element", count), args: args{ - idxes: createRandomData(count, &createRandomDataConfig{ + indices: createRandomData(count, &createRandomDataConfig{ additionaldigits: 11, }), poolSize: uint32(count / 10), @@ -1490,7 +1490,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { if err != nil { tt.Errorf("failed to init ngt service, error = %v", err) } - for _, idx := range test.args.idxes { + for _, idx := range test.args.indices { err = n.Insert(idx.uuid, idx.vec) if err := checkFunc(test.want, err); err != nil { tt.Errorf("error = %v", err) @@ -1508,7 +1508,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { idx := i eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-1", idx) - for _, idx := range test.args.idxes[:len(test.args.idxes)/3] { + for _, idx := range test.args.indices[:len(test.args.indices)/3] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1518,7 +1518,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-2", idx) - for _, idx := range test.args.idxes[len(test.args.idxes)/3 : 2*len(test.args.idxes)/3] { + for _, idx := range test.args.indices[len(test.args.indices)/3 : 2*len(test.args.indices)/3] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1528,7 +1528,7 @@ func Test_ngt_InsertUpsert(t *testing.T) { eg.Go(safety.RecoverFunc(func() error { log.Warnf("started %d-3", idx) - for _, idx := range test.args.idxes[2*len(test.args.idxes)/3:] { + for _, idx := range test.args.indices[2*len(test.args.indices)/3:] { _ = n.Delete(idx.uuid) _ = n.Insert(idx.uuid, idx.vec) } @@ -1569,9 +1569,9 @@ func Test_ngt_E2E(t *testing.T) { beforeFunc func(args) afterFunc func(args) } - multiUpsertRequestGenFunc := func(idxes []index, chunk int) (res []*payload.Upsert_MultiRequest) { + multiUpsertRequestGenFunc := func(indices []index, chunk int) (res []*payload.Upsert_MultiRequest) { reqs := make([]*payload.Upsert_Request, 0, chunk) - for i := 0; i < len(idxes); i++ { + for i := 0; i < len(indices); i++ { if len(reqs) == chunk-1 { res = append(res, &payload.Upsert_MultiRequest{ Requests: reqs, @@ -1580,8 +1580,8 @@ func Test_ngt_E2E(t *testing.T) { } else { reqs = append(reqs, &payload.Upsert_Request{ Vector: &payload.Object_Vector{ - Id: idxes[i].uuid, - Vector: idxes[i].vec, + Id: indices[i].uuid, + Vector: indices[i].vec, }, Config: &payload.Upsert_Config{ SkipStrictExistCheck: true, @@ -1784,10 +1784,993 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // +// func Test_ngt_copyNGT(t *testing.T) { +// type args struct { +// src *ngt +// } +// type fields struct { +// core core.NGT +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// indexing atomic.Value +// flushing atomic.Bool +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// nobic uint64 +// nopvq atomic.Uint64 +// cfg *config.NGT +// opts []Option +// inMem bool +// dim int +// alen int +// lim time.Duration +// dur time.Duration +// sdur time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// podName string +// podNamespace string +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// brokenPath string +// poolSize uint32 +// radius float32 +// epsilon float32 +// idelay time.Duration +// dcd bool +// kvsdbConcurrency int +// historyLimit int +// isReadReplica bool +// enableExportIndexInfo bool +// exportIndexInfoDuration time.Duration +// patcher client.Patcher +// enableStatistics bool +// statisticsCache atomic.Pointer[payload.Info_Index_Statistics] +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// src:ngt{}, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// src:ngt{}, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// n := &ngt{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// indexing: test.fields.indexing, +// flushing: test.fields.flushing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// nobic: test.fields.nobic, +// nopvq: test.fields.nopvq, +// cfg: test.fields.cfg, +// opts: test.fields.opts, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// alen: test.fields.alen, +// lim: test.fields.lim, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// podName: test.fields.podName, +// podNamespace: test.fields.podNamespace, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// brokenPath: test.fields.brokenPath, +// poolSize: test.fields.poolSize, +// radius: test.fields.radius, +// epsilon: test.fields.epsilon, +// idelay: test.fields.idelay, +// dcd: test.fields.dcd, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// historyLimit: test.fields.historyLimit, +// isReadReplica: test.fields.isReadReplica, +// enableExportIndexInfo: test.fields.enableExportIndexInfo, +// exportIndexInfoDuration: test.fields.exportIndexInfoDuration, +// patcher: test.fields.patcher, +// enableStatistics: test.fields.enableStatistics, +// statisticsCache: test.fields.statisticsCache, +// } +// +// n.copyNGT(test.args.src) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_migrate(t *testing.T) { // type args struct { -// ctx context.Context -// path string +// ctx context.Context +// path string +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// path:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// path:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// err := migrate(test.args.ctx, test.args.path) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_ngt_prepareFolders(t *testing.T) { +// type args struct { +// ctx context.Context +// } +// type fields struct { +// core core.NGT +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// indexing atomic.Value +// flushing atomic.Bool +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// nobic uint64 +// nopvq atomic.Uint64 +// cfg *config.NGT +// opts []Option +// inMem bool +// dim int +// alen int +// lim time.Duration +// dur time.Duration +// sdur time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// podName string +// podNamespace string +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// brokenPath string +// poolSize uint32 +// radius float32 +// epsilon float32 +// idelay time.Duration +// dcd bool +// kvsdbConcurrency int +// historyLimit int +// isReadReplica bool +// enableExportIndexInfo bool +// exportIndexInfoDuration time.Duration +// patcher client.Patcher +// enableStatistics bool +// statisticsCache atomic.Pointer[payload.Info_Index_Statistics] +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// n := &ngt{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// indexing: test.fields.indexing, +// flushing: test.fields.flushing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// nobic: test.fields.nobic, +// nopvq: test.fields.nopvq, +// cfg: test.fields.cfg, +// opts: test.fields.opts, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// alen: test.fields.alen, +// lim: test.fields.lim, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// podName: test.fields.podName, +// podNamespace: test.fields.podNamespace, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// brokenPath: test.fields.brokenPath, +// poolSize: test.fields.poolSize, +// radius: test.fields.radius, +// epsilon: test.fields.epsilon, +// idelay: test.fields.idelay, +// dcd: test.fields.dcd, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// historyLimit: test.fields.historyLimit, +// isReadReplica: test.fields.isReadReplica, +// enableExportIndexInfo: test.fields.enableExportIndexInfo, +// exportIndexInfoDuration: test.fields.exportIndexInfoDuration, +// patcher: test.fields.patcher, +// enableStatistics: test.fields.enableStatistics, +// statisticsCache: test.fields.statisticsCache, +// } +// +// err := n.prepareFolders(test.args.ctx) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_ngt_load(t *testing.T) { +// type args struct { +// ctx context.Context +// path string +// opts []core.Option +// } +// type fields struct { +// core core.NGT +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// indexing atomic.Value +// flushing atomic.Bool +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// nobic uint64 +// nopvq atomic.Uint64 +// cfg *config.NGT +// opts []Option +// inMem bool +// dim int +// alen int +// lim time.Duration +// dur time.Duration +// sdur time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// podName string +// podNamespace string +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// brokenPath string +// poolSize uint32 +// radius float32 +// epsilon float32 +// idelay time.Duration +// dcd bool +// kvsdbConcurrency int +// historyLimit int +// isReadReplica bool +// enableExportIndexInfo bool +// exportIndexInfoDuration time.Duration +// patcher client.Patcher +// enableStatistics bool +// statisticsCache atomic.Pointer[payload.Info_Index_Statistics] +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// path:"", +// opts:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// path:"", +// opts:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", +// path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// n := &ngt{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// indexing: test.fields.indexing, +// flushing: test.fields.flushing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// nobic: test.fields.nobic, +// nopvq: test.fields.nopvq, +// cfg: test.fields.cfg, +// opts: test.fields.opts, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// alen: test.fields.alen, +// lim: test.fields.lim, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// podName: test.fields.podName, +// podNamespace: test.fields.podNamespace, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// brokenPath: test.fields.brokenPath, +// poolSize: test.fields.poolSize, +// radius: test.fields.radius, +// epsilon: test.fields.epsilon, +// idelay: test.fields.idelay, +// dcd: test.fields.dcd, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// historyLimit: test.fields.historyLimit, +// isReadReplica: test.fields.isReadReplica, +// enableExportIndexInfo: test.fields.enableExportIndexInfo, +// exportIndexInfoDuration: test.fields.exportIndexInfoDuration, +// patcher: test.fields.patcher, +// enableStatistics: test.fields.enableStatistics, +// statisticsCache: test.fields.statisticsCache, +// } +// +// err := n.load(test.args.ctx, test.args.path, test.args.opts...) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_ngt_backupBroken(t *testing.T) { +// type args struct { +// ctx context.Context +// } +// type fields struct { +// core core.NGT +// eg errgroup.Group +// kvs kvs.BidiMap +// fmap map[string]int64 +// vq vqueue.Queue +// indexing atomic.Value +// flushing atomic.Bool +// saving atomic.Value +// lastNocie uint64 +// nocie uint64 +// nogce uint64 +// wfci uint64 +// nobic uint64 +// nopvq atomic.Uint64 +// cfg *config.NGT +// opts []Option +// inMem bool +// dim int +// alen int +// lim time.Duration +// dur time.Duration +// sdur time.Duration +// minLit time.Duration +// maxLit time.Duration +// litFactor time.Duration +// enableProactiveGC bool +// enableCopyOnWrite bool +// podName string +// podNamespace string +// path string +// tmpPath atomic.Value +// oldPath string +// basePath string +// brokenPath string +// poolSize uint32 +// radius float32 +// epsilon float32 +// idelay time.Duration +// dcd bool +// kvsdbConcurrency int +// historyLimit int +// isReadReplica bool +// enableExportIndexInfo bool +// exportIndexInfoDuration time.Duration +// patcher client.Patcher +// enableStatistics bool +// statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { // err error @@ -1795,6 +2778,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // type test struct { // name string // args args +// fields fields // want want // checkFunc func(want, error) error // beforeFunc func(*testing.T, args) @@ -1813,7 +2797,55 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", // path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1833,7 +2865,55 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, +// }, +// fields: fields { +// core:nil, +// eg:nil, +// kvs:nil, +// fmap:nil, +// vq:nil, +// indexing:nil, +// flushing:nil, +// saving:nil, +// lastNocie:0, +// nocie:0, +// nogce:0, +// wfci:0, +// nobic:0, +// nopvq:nil, +// cfg:nil, +// opts:nil, +// inMem:false, +// dim:0, +// alen:0, +// lim:nil, +// dur:nil, +// sdur:nil, +// minLit:nil, +// maxLit:nil, +// litFactor:nil, +// enableProactiveGC:false, +// enableCopyOnWrite:false, +// podName:"", +// podNamespace:"", // path:"", +// tmpPath:nil, +// oldPath:"", +// basePath:"", +// brokenPath:"", +// poolSize:0, +// radius:0, +// epsilon:0, +// idelay:nil, +// dcd:false, +// kvsdbConcurrency:0, +// historyLimit:0, +// isReadReplica:false, +// enableExportIndexInfo:false, +// exportIndexInfoDuration:nil, +// patcher:nil, +// enableStatistics:false, +// statisticsCache:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1863,8 +2943,57 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // if test.checkFunc == nil { // checkFunc = defaultCheckFunc // } +// n := &ngt{ +// core: test.fields.core, +// eg: test.fields.eg, +// kvs: test.fields.kvs, +// fmap: test.fields.fmap, +// vq: test.fields.vq, +// indexing: test.fields.indexing, +// flushing: test.fields.flushing, +// saving: test.fields.saving, +// lastNocie: test.fields.lastNocie, +// nocie: test.fields.nocie, +// nogce: test.fields.nogce, +// wfci: test.fields.wfci, +// nobic: test.fields.nobic, +// nopvq: test.fields.nopvq, +// cfg: test.fields.cfg, +// opts: test.fields.opts, +// inMem: test.fields.inMem, +// dim: test.fields.dim, +// alen: test.fields.alen, +// lim: test.fields.lim, +// dur: test.fields.dur, +// sdur: test.fields.sdur, +// minLit: test.fields.minLit, +// maxLit: test.fields.maxLit, +// litFactor: test.fields.litFactor, +// enableProactiveGC: test.fields.enableProactiveGC, +// enableCopyOnWrite: test.fields.enableCopyOnWrite, +// podName: test.fields.podName, +// podNamespace: test.fields.podNamespace, +// path: test.fields.path, +// tmpPath: test.fields.tmpPath, +// oldPath: test.fields.oldPath, +// basePath: test.fields.basePath, +// brokenPath: test.fields.brokenPath, +// poolSize: test.fields.poolSize, +// radius: test.fields.radius, +// epsilon: test.fields.epsilon, +// idelay: test.fields.idelay, +// dcd: test.fields.dcd, +// kvsdbConcurrency: test.fields.kvsdbConcurrency, +// historyLimit: test.fields.historyLimit, +// isReadReplica: test.fields.isReadReplica, +// enableExportIndexInfo: test.fields.enableExportIndexInfo, +// exportIndexInfoDuration: test.fields.exportIndexInfoDuration, +// patcher: test.fields.patcher, +// enableStatistics: test.fields.enableStatistics, +// statisticsCache: test.fields.statisticsCache, +// } // -// err := migrate(test.args.ctx, test.args.path) +// err := n.backupBroken(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -1872,9 +3001,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_prepareFolders(t *testing.T) { +// func Test_ngt_rebuild(t *testing.T) { // type args struct { -// ctx context.Context +// ctx context.Context +// path string +// opts []core.Option // } // type fields struct { // core core.NGT @@ -1950,6 +3081,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, +// path:"", +// opts:nil, // }, // fields: fields { // core:nil, @@ -2018,6 +3151,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, +// path:"", +// opts:nil, // }, // fields: fields { // core:nil, @@ -2146,7 +3281,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.prepareFolders(test.args.ctx) +// err := n.rebuild(test.args.ctx, test.args.path, test.args.opts...) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -2154,10 +3289,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_load(t *testing.T) { +// func Test_ngt_initNGT(t *testing.T) { // type args struct { -// ctx context.Context -// path string // opts []core.Option // } // type fields struct { @@ -2233,8 +3366,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// ctx:nil, -// path:"", // opts:nil, // }, // fields: fields { @@ -2303,8 +3434,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// ctx:nil, -// path:"", // opts:nil, // }, // fields: fields { @@ -2434,7 +3563,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.load(test.args.ctx, test.args.path, test.args.opts...) +// err := n.initNGT(test.args.opts...) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -2442,9 +3571,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_backupBroken(t *testing.T) { +// func Test_ngt_loadKVS(t *testing.T) { // type args struct { -// ctx context.Context +// ctx context.Context +// path string +// timeout time.Duration // } // type fields struct { // core core.NGT @@ -2520,6 +3651,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, +// path:"", +// timeout:nil, // }, // fields: fields { // core:nil, @@ -2588,6 +3721,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, +// path:"", +// timeout:nil, // }, // fields: fields { // core:nil, @@ -2716,7 +3851,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.backupBroken(test.args.ctx) +// err := n.loadKVS(test.args.ctx, test.args.path, test.args.timeout) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -2724,11 +3859,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_rebuild(t *testing.T) { +// func Test_ngt_Start(t *testing.T) { // type args struct { -// ctx context.Context -// path string -// opts []core.Option +// ctx context.Context // } // type fields struct { // core core.NGT @@ -2780,20 +3913,20 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// err error +// want <-chan error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, error) error +// checkFunc func(want, <-chan error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { -// if !errors.Is(err, w.err) { -// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// defaultCheckFunc := func(w want, got <-chan error) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } // return nil // } @@ -2804,8 +3937,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, -// path:"", -// opts:nil, // }, // fields: fields { // core:nil, @@ -2874,8 +4005,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, -// path:"", -// opts:nil, // }, // fields: fields { // core:nil, @@ -3004,17 +4133,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.rebuild(test.args.ctx, test.args.path, test.args.opts...) -// if err := checkFunc(test.want, err); err != nil { +// got := n.Start(test.args.ctx) +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_initNGT(t *testing.T) { +// func Test_ngt_Search(t *testing.T) { // type args struct { -// opts []core.Option +// ctx context.Context +// vec []float32 +// size uint32 +// epsilon float32 +// radius float32 // } // type fields struct { // core core.NGT @@ -3066,21 +4199,25 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// err error +// wantRes *payload.Search_Response +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, error) error +// checkFunc func(want, *payload.Search_Response, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { +// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } // return nil // } // tests := []test{ @@ -3089,7 +4226,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// opts:nil, +// ctx:nil, +// vec:nil, +// size:0, +// epsilon:0, +// radius:0, // }, // fields: fields { // core:nil, @@ -3157,7 +4298,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// opts:nil, +// ctx:nil, +// vec:nil, +// size:0, +// epsilon:0, +// radius:0, // }, // fields: fields { // core:nil, @@ -3286,19 +4431,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.initNGT(test.args.opts...) -// if err := checkFunc(test.want, err); err != nil { +// gotRes, err := n.Search(test.args.ctx, test.args.vec, test.args.size, test.args.epsilon, test.args.radius) +// if err := checkFunc(test.want, gotRes, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_loadKVS(t *testing.T) { +// func Test_ngt_SearchByID(t *testing.T) { // type args struct { // ctx context.Context -// path string -// timeout time.Duration +// uuid string +// size uint32 +// epsilon float32 +// radius float32 // } // type fields struct { // core core.NGT @@ -3350,21 +4497,29 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// err error +// wantVec []float32 +// wantDst *payload.Search_Response +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, error) error +// checkFunc func(want, []float32, *payload.Search_Response, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { +// defaultCheckFunc := func(w want, gotVec []float32, gotDst *payload.Search_Response, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotDst, w.wantDst) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDst, w.wantDst) +// } // return nil // } // tests := []test{ @@ -3374,8 +4529,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, -// path:"", -// timeout:nil, +// uuid:"", +// size:0, +// epsilon:0, +// radius:0, // }, // fields: fields { // core:nil, @@ -3444,8 +4601,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, -// path:"", -// timeout:nil, +// uuid:"", +// size:0, +// epsilon:0, +// radius:0, // }, // fields: fields { // core:nil, @@ -3574,17 +4733,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.loadKVS(test.args.ctx, test.args.path, test.args.timeout) -// if err := checkFunc(test.want, err); err != nil { +// gotVec, gotDst, err := n.SearchByID(test.args.ctx, test.args.uuid, test.args.size, test.args.epsilon, test.args.radius) +// if err := checkFunc(test.want, gotVec, gotDst, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_Start(t *testing.T) { +// func Test_ngt_LinearSearch(t *testing.T) { // type args struct { -// ctx context.Context +// ctx context.Context +// vec []float32 +// size uint32 // } // type fields struct { // core core.NGT @@ -3636,20 +4797,24 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want <-chan error +// wantRes *payload.Search_Response +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, <-chan error) error +// checkFunc func(want, *payload.Search_Response, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got <-chan error) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) // } // return nil // } @@ -3660,6 +4825,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, +// vec:nil, +// size:0, // }, // fields: fields { // core:nil, @@ -3728,6 +4895,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, +// vec:nil, +// size:0, // }, // fields: fields { // core:nil, @@ -3856,21 +5025,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.Start(test.args.ctx) -// if err := checkFunc(test.want, got); err != nil { +// gotRes, err := n.LinearSearch(test.args.ctx, test.args.vec, test.args.size) +// if err := checkFunc(test.want, gotRes, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_Search(t *testing.T) { +// func Test_ngt_LinearSearchByID(t *testing.T) { // type args struct { -// ctx context.Context -// vec []float32 -// size uint32 -// epsilon float32 -// radius float32 +// ctx context.Context +// uuid string +// size uint32 // } // type fields struct { // core core.NGT @@ -3922,7 +5089,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantRes *payload.Search_Response +// wantVec []float32 +// wantDst *payload.Search_Response // err error // } // type test struct { @@ -3930,16 +5098,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // args args // fields fields // want want -// checkFunc func(want, *payload.Search_Response, error) error +// checkFunc func(want, []float32, *payload.Search_Response, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { +// defaultCheckFunc := func(w want, gotVec []float32, gotDst *payload.Search_Response, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } -// if !reflect.DeepEqual(gotRes, w.wantRes) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotDst, w.wantDst) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDst, w.wantDst) // } // return nil // } @@ -3950,10 +5121,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, -// vec:nil, +// uuid:"", // size:0, -// epsilon:0, -// radius:0, // }, // fields: fields { // core:nil, @@ -4022,10 +5191,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, -// vec:nil, +// uuid:"", // size:0, -// epsilon:0, -// radius:0, // }, // fields: fields { // core:nil, @@ -4154,21 +5321,18 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotRes, err := n.Search(test.args.ctx, test.args.vec, test.args.size, test.args.epsilon, test.args.radius) -// if err := checkFunc(test.want, gotRes, err); err != nil { +// gotVec, gotDst, err := n.LinearSearchByID(test.args.ctx, test.args.uuid, test.args.size) +// if err := checkFunc(test.want, gotVec, gotDst, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_SearchByID(t *testing.T) { +// func Test_ngt_Insert(t *testing.T) { // type args struct { -// ctx context.Context -// uuid string -// size uint32 -// epsilon float32 -// radius float32 +// uuid string +// vec []float32 // } // type fields struct { // core core.NGT @@ -4220,29 +5384,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantVec []float32 -// wantDst *payload.Search_Response -// err error +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, []float32, *payload.Search_Response, error) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotVec []float32, gotDst *payload.Search_Response, err error) error { +// defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } -// if !reflect.DeepEqual(gotVec, w.wantVec) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) -// } -// if !reflect.DeepEqual(gotDst, w.wantDst) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDst, w.wantDst) -// } // return nil // } // tests := []test{ @@ -4251,11 +5407,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// ctx:nil, // uuid:"", -// size:0, -// epsilon:0, -// radius:0, +// vec:nil, // }, // fields: fields { // core:nil, @@ -4323,11 +5476,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// ctx:nil, // uuid:"", -// size:0, -// epsilon:0, -// radius:0, +// vec:nil, // }, // fields: fields { // core:nil, @@ -4456,19 +5606,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotVec, gotDst, err := n.SearchByID(test.args.ctx, test.args.uuid, test.args.size, test.args.epsilon, test.args.radius) -// if err := checkFunc(test.want, gotVec, gotDst, err); err != nil { +// err := n.Insert(test.args.uuid, test.args.vec) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_LinearSearch(t *testing.T) { +// func Test_ngt_InsertWithTime(t *testing.T) { // type args struct { -// ctx context.Context +// uuid string // vec []float32 -// size uint32 +// t int64 // } // type fields struct { // core core.NGT @@ -4520,25 +5670,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantRes *payload.Search_Response -// err error +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, *payload.Search_Response, error) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { +// defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } -// if !reflect.DeepEqual(gotRes, w.wantRes) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) -// } // return nil // } // tests := []test{ @@ -4547,9 +5693,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// ctx:nil, +// uuid:"", // vec:nil, -// size:0, +// t:0, // }, // fields: fields { // core:nil, @@ -4617,9 +5763,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// ctx:nil, +// uuid:"", // vec:nil, -// size:0, +// t:0, // }, // fields: fields { // core:nil, @@ -4748,19 +5894,20 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotRes, err := n.LinearSearch(test.args.ctx, test.args.vec, test.args.size) -// if err := checkFunc(test.want, gotRes, err); err != nil { +// err := n.InsertWithTime(test.args.uuid, test.args.vec, test.args.t) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_LinearSearchByID(t *testing.T) { +// func Test_ngt_insert(t *testing.T) { // type args struct { -// ctx context.Context -// uuid string -// size uint32 +// uuid string +// vec []float32 +// t int64 +// validation bool // } // type fields struct { // core core.NGT @@ -4812,29 +5959,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantVec []float32 -// wantDst *payload.Search_Response -// err error +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, []float32, *payload.Search_Response, error) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotVec []float32, gotDst *payload.Search_Response, err error) error { +// defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } -// if !reflect.DeepEqual(gotVec, w.wantVec) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) -// } -// if !reflect.DeepEqual(gotDst, w.wantDst) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDst, w.wantDst) -// } // return nil // } // tests := []test{ @@ -4843,9 +5982,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// ctx:nil, // uuid:"", -// size:0, +// vec:nil, +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -4913,9 +6053,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// ctx:nil, // uuid:"", -// size:0, +// vec:nil, +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -5044,18 +6185,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotVec, gotDst, err := n.LinearSearchByID(test.args.ctx, test.args.uuid, test.args.size) -// if err := checkFunc(test.want, gotVec, gotDst, err); err != nil { +// err := n.insert(test.args.uuid, test.args.vec, test.args.t, test.args.validation) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_Insert(t *testing.T) { +// func Test_ngt_InsertMultiple(t *testing.T) { // type args struct { -// uuid string -// vec []float32 +// vecs map[string][]float32 // } // type fields struct { // core core.NGT @@ -5130,8 +6270,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // }, // fields: fields { // core:nil, @@ -5199,8 +6338,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // }, // fields: fields { // core:nil, @@ -5329,7 +6467,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.Insert(test.args.uuid, test.args.vec) +// err := n.InsertMultiple(test.args.vecs) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -5337,10 +6475,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_InsertWithTime(t *testing.T) { +// func Test_ngt_InsertMultipleWithTime(t *testing.T) { // type args struct { -// uuid string -// vec []float32 +// vecs map[string][]float32 // t int64 // } // type fields struct { @@ -5416,8 +6553,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // t:0, // }, // fields: fields { @@ -5486,8 +6622,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // t:0, // }, // fields: fields { @@ -5617,7 +6752,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.InsertWithTime(test.args.uuid, test.args.vec, test.args.t) +// err := n.InsertMultipleWithTime(test.args.vecs, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -5625,11 +6760,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_insert(t *testing.T) { +// func Test_ngt_insertMultiple(t *testing.T) { // type args struct { -// uuid string -// vec []float32 -// t int64 +// vecs map[string][]float32 +// now int64 // validation bool // } // type fields struct { @@ -5705,9 +6839,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuid:"", -// vec:nil, -// t:0, +// vecs:nil, +// now:0, // validation:false, // }, // fields: fields { @@ -5776,9 +6909,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuid:"", -// vec:nil, -// t:0, +// vecs:nil, +// now:0, // validation:false, // }, // fields: fields { @@ -5908,7 +7040,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.insert(test.args.uuid, test.args.vec, test.args.t, test.args.validation) +// err := n.insertMultiple(test.args.vecs, test.args.now, test.args.validation) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -5916,9 +7048,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_InsertMultiple(t *testing.T) { +// func Test_ngt_Update(t *testing.T) { // type args struct { -// vecs map[string][]float32 +// uuid string +// vec []float32 // } // type fields struct { // core core.NGT @@ -5993,7 +7126,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// vecs:nil, +// uuid:"", +// vec:nil, // }, // fields: fields { // core:nil, @@ -6061,7 +7195,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// vecs:nil, +// uuid:"", +// vec:nil, // }, // fields: fields { // core:nil, @@ -6190,7 +7325,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.InsertMultiple(test.args.vecs) +// err := n.Update(test.args.uuid, test.args.vec) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -6198,9 +7333,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_InsertMultipleWithTime(t *testing.T) { +// func Test_ngt_UpdateWithTime(t *testing.T) { // type args struct { -// vecs map[string][]float32 +// uuid string +// vec []float32 // t int64 // } // type fields struct { @@ -6276,7 +7412,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// vecs:nil, +// uuid:"", +// vec:nil, // t:0, // }, // fields: fields { @@ -6345,7 +7482,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// vecs:nil, +// uuid:"", +// vec:nil, // t:0, // }, // fields: fields { @@ -6475,7 +7613,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.InsertMultipleWithTime(test.args.vecs, test.args.t) +// err := n.UpdateWithTime(test.args.uuid, test.args.vec, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -6483,11 +7621,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_insertMultiple(t *testing.T) { +// func Test_ngt_update(t *testing.T) { // type args struct { -// vecs map[string][]float32 -// now int64 -// validation bool +// uuid string +// vec []float32 +// t int64 // } // type fields struct { // core core.NGT @@ -6562,9 +7700,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// vecs:nil, -// now:0, -// validation:false, +// uuid:"", +// vec:nil, +// t:0, // }, // fields: fields { // core:nil, @@ -6632,9 +7770,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// vecs:nil, -// now:0, -// validation:false, +// uuid:"", +// vec:nil, +// t:0, // }, // fields: fields { // core:nil, @@ -6763,7 +7901,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.insertMultiple(test.args.vecs, test.args.now, test.args.validation) +// err := n.update(test.args.uuid, test.args.vec, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -6771,10 +7909,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_Update(t *testing.T) { +// func Test_ngt_UpdateMultiple(t *testing.T) { // type args struct { -// uuid string -// vec []float32 +// vecs map[string][]float32 // } // type fields struct { // core core.NGT @@ -6849,8 +7986,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // }, // fields: fields { // core:nil, @@ -6918,8 +8054,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // }, // fields: fields { // core:nil, @@ -7048,7 +8183,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.Update(test.args.uuid, test.args.vec) +// err := n.UpdateMultiple(test.args.vecs) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -7056,10 +8191,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_UpdateWithTime(t *testing.T) { +// func Test_ngt_UpdateMultipleWithTime(t *testing.T) { // type args struct { -// uuid string -// vec []float32 +// vecs map[string][]float32 // t int64 // } // type fields struct { @@ -7135,8 +8269,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // t:0, // }, // fields: fields { @@ -7205,8 +8338,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // t:0, // }, // fields: fields { @@ -7336,7 +8468,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.UpdateWithTime(test.args.uuid, test.args.vec, test.args.t) +// err := n.UpdateMultipleWithTime(test.args.vecs, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -7344,10 +8476,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_update(t *testing.T) { +// func Test_ngt_updateMultiple(t *testing.T) { // type args struct { -// uuid string -// vec []float32 +// vecs map[string][]float32 // t int64 // } // type fields struct { @@ -7423,8 +8554,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // t:0, // }, // fields: fields { @@ -7493,8 +8623,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuid:"", -// vec:nil, +// vecs:nil, // t:0, // }, // fields: fields { @@ -7624,7 +8753,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.update(test.args.uuid, test.args.vec, test.args.t) +// err := n.updateMultiple(test.args.vecs, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -7632,9 +8761,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_UpdateMultiple(t *testing.T) { +// func Test_ngt_UpdateTimestamp(t *testing.T) { // type args struct { -// vecs map[string][]float32 +// uuid string +// ts int64 +// force bool // } // type fields struct { // core core.NGT @@ -7709,7 +8840,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// vecs:nil, +// uuid:"", +// ts:0, +// force:false, // }, // fields: fields { // core:nil, @@ -7777,7 +8910,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// vecs:nil, +// uuid:"", +// ts:0, +// force:false, // }, // fields: fields { // core:nil, @@ -7906,7 +9041,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.UpdateMultiple(test.args.vecs) +// err := n.UpdateTimestamp(test.args.uuid, test.args.ts, test.args.force) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -7914,10 +9049,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_UpdateMultipleWithTime(t *testing.T) { +// func Test_ngt_Delete(t *testing.T) { // type args struct { -// vecs map[string][]float32 -// t int64 +// uuid string // } // type fields struct { // core core.NGT @@ -7992,8 +9126,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// vecs:nil, -// t:0, +// uuid:"", // }, // fields: fields { // core:nil, @@ -8061,8 +9194,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// vecs:nil, -// t:0, +// uuid:"", // }, // fields: fields { // core:nil, @@ -8191,7 +9323,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.UpdateMultipleWithTime(test.args.vecs, test.args.t) +// err := n.Delete(test.args.uuid) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -8199,9 +9331,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_updateMultiple(t *testing.T) { +// func Test_ngt_DeleteWithTime(t *testing.T) { // type args struct { -// vecs map[string][]float32 +// uuid string // t int64 // } // type fields struct { @@ -8277,7 +9409,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// vecs:nil, +// uuid:"", // t:0, // }, // fields: fields { @@ -8346,7 +9478,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// vecs:nil, +// uuid:"", // t:0, // }, // fields: fields { @@ -8476,7 +9608,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.updateMultiple(test.args.vecs, test.args.t) +// err := n.DeleteWithTime(test.args.uuid, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -8484,9 +9616,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_Delete(t *testing.T) { +// func Test_ngt_delete(t *testing.T) { // type args struct { -// uuid string +// uuid string +// t int64 +// validation bool // } // type fields struct { // core core.NGT @@ -8562,6 +9696,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // uuid:"", +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -8630,6 +9766,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // uuid:"", +// t:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -8758,7 +9896,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.Delete(test.args.uuid) +// err := n.delete(test.args.uuid, test.args.t, test.args.validation) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -8766,10 +9904,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_DeleteWithTime(t *testing.T) { +// func Test_ngt_DeleteMultiple(t *testing.T) { // type args struct { -// uuid string -// t int64 +// uuids []string // } // type fields struct { // core core.NGT @@ -8844,8 +9981,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuid:"", -// t:0, +// uuids:nil, // }, // fields: fields { // core:nil, @@ -8913,8 +10049,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuid:"", -// t:0, +// uuids:nil, // }, // fields: fields { // core:nil, @@ -9043,7 +10178,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.DeleteWithTime(test.args.uuid, test.args.t) +// err := n.DeleteMultiple(test.args.uuids...) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -9051,11 +10186,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_delete(t *testing.T) { +// func Test_ngt_DeleteMultipleWithTime(t *testing.T) { // type args struct { -// uuid string -// t int64 -// validation bool +// uuids []string +// t int64 // } // type fields struct { // core core.NGT @@ -9130,9 +10264,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuid:"", +// uuids:nil, // t:0, -// validation:false, // }, // fields: fields { // core:nil, @@ -9200,9 +10333,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuid:"", +// uuids:nil, // t:0, -// validation:false, // }, // fields: fields { // core:nil, @@ -9331,7 +10463,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.delete(test.args.uuid, test.args.t, test.args.validation) +// err := n.DeleteMultipleWithTime(test.args.uuids, test.args.t) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -9339,9 +10471,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_DeleteMultiple(t *testing.T) { +// func Test_ngt_deleteMultiple(t *testing.T) { // type args struct { -// uuids []string +// uuids []string +// now int64 +// validation bool // } // type fields struct { // core core.NGT @@ -9417,6 +10551,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // uuids:nil, +// now:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -9485,6 +10621,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // uuids:nil, +// now:0, +// validation:false, // }, // fields: fields { // core:nil, @@ -9613,7 +10751,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.DeleteMultiple(test.args.uuids...) +// err := n.deleteMultiple(test.args.uuids, test.args.now, test.args.validation) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -9621,10 +10759,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_DeleteMultipleWithTime(t *testing.T) { +// func Test_ngt_RegenerateIndexes(t *testing.T) { // type args struct { -// uuids []string -// t int64 +// ctx context.Context // } // type fields struct { // core core.NGT @@ -9699,8 +10836,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // { // name: "test_case_1", // args: args { -// uuids:nil, -// t:0, +// ctx:nil, // }, // fields: fields { // core:nil, @@ -9768,8 +10904,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // return test { // name: "test_case_2", // args: args { -// uuids:nil, -// t:0, +// ctx:nil, // }, // fields: fields { // core:nil, @@ -9898,7 +11033,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.DeleteMultipleWithTime(test.args.uuids, test.args.t) +// err := n.RegenerateIndexes(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -9906,12 +11041,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_deleteMultiple(t *testing.T) { -// type args struct { -// uuids []string -// now int64 -// validation bool -// } +// func Test_ngt_loadStatistics(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -9966,12 +11096,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // type test struct { // name string -// args args // fields fields // want want // checkFunc func(want, error) error -// beforeFunc func(*testing.T, args) -// afterFunc func(*testing.T, args) +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) // } // defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { @@ -9984,11 +11113,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", -// args: args { -// uuids:nil, -// now:0, -// validation:false, -// }, // fields: fields { // core:nil, // eg:nil, @@ -10040,10 +11164,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // }, @@ -10054,11 +11178,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", -// args: args { -// uuids:nil, -// now:0, -// validation:false, -// }, // fields: fields { // core:nil, // eg:nil, @@ -10110,10 +11229,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // } @@ -10127,10 +11246,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt, test.args) +// test.beforeFunc(tt) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt, test.args) +// defer test.afterFunc(tt) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -10186,7 +11305,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.deleteMultiple(test.args.uuids, test.args.now, test.args.validation) +// err := n.loadStatistics() // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -10194,7 +11313,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_RegenerateIndexes(t *testing.T) { +// func Test_ngt_removeInvalidIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -10247,22 +11366,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct { -// err error -// } +// type want struct{} // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, error) error +// checkFunc func(want) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { -// if !errors.Is(err, w.err) { -// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) -// } +// defaultCheckFunc := func(w want) error { // return nil // } // tests := []test{ @@ -10468,15 +11582,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.RegenerateIndexes(test.args.ctx) -// if err := checkFunc(test.want, err); err != nil { +// n.removeInvalidIndex(test.args.ctx) +// if err := checkFunc(test.want); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_removeInvalidIndex(t *testing.T) { +// func Test_ngt_saveIndex(t *testing.T) { // type args struct { // ctx context.Context // } @@ -10529,17 +11643,22 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct{} +// type want struct { +// err error +// } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } // return nil // } // tests := []test{ @@ -10745,17 +11864,18 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// n.removeInvalidIndex(test.args.ctx) -// if err := checkFunc(test.want); err != nil { +// err := n.saveIndex(test.args.ctx) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_saveIndex(t *testing.T) { +// func Test_ngt_CreateAndSaveIndex(t *testing.T) { // type args struct { -// ctx context.Context +// ctx context.Context +// poolSize uint32 // } // type fields struct { // core core.NGT @@ -10831,6 +11951,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, +// poolSize:0, // }, // fields: fields { // core:nil, @@ -10899,6 +12020,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, +// poolSize:0, // }, // fields: fields { // core:nil, @@ -11027,7 +12149,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.saveIndex(test.args.ctx) +// err := n.CreateAndSaveIndex(test.args.ctx, test.args.poolSize) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -11035,10 +12157,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_CreateAndSaveIndex(t *testing.T) { +// func Test_ngt_moveAndSwitchSavedData(t *testing.T) { // type args struct { -// ctx context.Context -// poolSize uint32 +// ctx context.Context // } // type fields struct { // core core.NGT @@ -11114,7 +12235,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // ctx:nil, -// poolSize:0, // }, // fields: fields { // core:nil, @@ -11183,7 +12303,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // ctx:nil, -// poolSize:0, // }, // fields: fields { // core:nil, @@ -11312,7 +12431,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.CreateAndSaveIndex(test.args.ctx, test.args.poolSize) +// err := n.moveAndSwitchSavedData(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -11320,10 +12439,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_moveAndSwitchSavedData(t *testing.T) { -// type args struct { -// ctx context.Context -// } +// func Test_ngt_mktmp(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -11378,12 +12494,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // type test struct { // name string -// args args // fields fields // want want // checkFunc func(want, error) error -// beforeFunc func(*testing.T, args) -// afterFunc func(*testing.T, args) +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) // } // defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { @@ -11396,9 +12511,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", -// args: args { -// ctx:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -11450,10 +12562,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // }, @@ -11464,9 +12576,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", -// args: args { -// ctx:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -11518,10 +12627,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // } @@ -11535,10 +12644,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt, test.args) +// test.beforeFunc(tt) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt, test.args) +// defer test.afterFunc(tt) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -11594,7 +12703,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.moveAndSwitchSavedData(test.args.ctx) +// err := n.mktmp() // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -11602,7 +12711,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_mktmp(t *testing.T) { +// func Test_ngt_Exists(t *testing.T) { +// type args struct { +// uuid string +// } // type fields struct { // core core.NGT // eg errgroup.Group @@ -11653,19 +12765,24 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// err error +// wantOid uint32 +// wantOk bool // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, error) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, uint32, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, err error) error { -// if !errors.Is(err, w.err) { -// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// defaultCheckFunc := func(w want, gotOid uint32, gotOk bool) error { +// if !reflect.DeepEqual(gotOid, w.wantOid) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOid, w.wantOid) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -11674,6 +12791,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -11725,10 +12845,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -11739,6 +12859,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", +// args: args { +// uuid:"", +// }, // fields: fields { // core:nil, // eg:nil, @@ -11790,10 +12913,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -11807,10 +12930,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -11866,15 +12989,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.mktmp() -// if err := checkFunc(test.want, err); err != nil { +// gotOid, gotOk := n.Exists(test.args.uuid) +// if err := checkFunc(test.want, gotOid, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_Exists(t *testing.T) { +// func Test_ngt_GetObject(t *testing.T) { // type args struct { // uuid string // } @@ -11928,24 +13051,28 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantOid uint32 -// wantOk bool +// wantVec []float32 +// wantTimestamp int64 +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, uint32, bool) error +// checkFunc func(want, []float32, int64, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotOid uint32, gotOk bool) error { -// if !reflect.DeepEqual(gotOid, w.wantOid) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOid, w.wantOid) +// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } -// if !reflect.DeepEqual(gotOk, w.wantOk) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) // } // return nil // } @@ -12152,17 +13279,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotOid, gotOk := n.Exists(test.args.uuid) -// if err := checkFunc(test.want, gotOid, gotOk); err != nil { +// gotVec, gotTimestamp, err := n.GetObject(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotTimestamp, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_GetObject(t *testing.T) { +// func Test_ngt_readyForUpdate(t *testing.T) { // type args struct { // uuid string +// vec []float32 +// ts int64 // } // type fields struct { // core core.NGT @@ -12214,29 +13343,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantVec []float32 -// wantTimestamp int64 -// err error +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, []float32, int64, error) error +// checkFunc func(want, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, err error) error { +// defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } -// if !reflect.DeepEqual(gotVec, w.wantVec) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) -// } -// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) -// } // return nil // } // tests := []test{ @@ -12246,6 +13367,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_1", // args: args { // uuid:"", +// vec:nil, +// ts:0, // }, // fields: fields { // core:nil, @@ -12314,6 +13437,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // name: "test_case_2", // args: args { // uuid:"", +// vec:nil, +// ts:0, // }, // fields: fields { // core:nil, @@ -12442,19 +13567,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotVec, gotTimestamp, err := n.GetObject(test.args.uuid) -// if err := checkFunc(test.want, gotVec, gotTimestamp, err); err != nil { +// err := n.readyForUpdate(test.args.uuid, test.args.vec, test.args.ts) +// if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_readyForUpdate(t *testing.T) { -// type args struct { -// uuid string -// vec []float32 -// } +// func Test_ngt_IsSaving(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -12505,20 +13626,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// err error +// want bool // } // type test struct { // name string -// args args // fields fields // want want -// checkFunc func(want, error) error -// beforeFunc func(*testing.T, args) -// afterFunc func(*testing.T, args) +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, err error) error { -// if !errors.Is(err, w.err) { -// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } // return nil // } @@ -12527,10 +13647,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", -// args: args { -// uuid:"", -// vec:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -12582,10 +13698,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // }, @@ -12596,10 +13712,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", -// args: args { -// uuid:"", -// vec:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -12651,10 +13763,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // } @@ -12668,10 +13780,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt, test.args) +// test.beforeFunc(tt) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt, test.args) +// defer test.afterFunc(tt) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -12727,15 +13839,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.readyForUpdate(test.args.uuid, test.args.vec) -// if err := checkFunc(test.want, err); err != nil { +// got := n.IsSaving() +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_IsSaving(t *testing.T) { +// func Test_ngt_IsIndexing(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -12999,7 +14111,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.IsSaving() +// got := n.IsIndexing() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -13007,7 +14119,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_IsIndexing(t *testing.T) { +// func Test_ngt_IsFlushing(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -13271,7 +14383,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.IsIndexing() +// got := n.IsFlushing() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -13279,7 +14391,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_IsFlushing(t *testing.T) { +// func Test_ngt_UUIDs(t *testing.T) { +// type args struct { +// ctx context.Context +// } // type fields struct { // core core.NGT // eg errgroup.Group @@ -13330,19 +14445,20 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want bool +// wantUuids []string // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, bool) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, []string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotUuids []string) error { +// if !reflect.DeepEqual(gotUuids, w.wantUuids) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotUuids, w.wantUuids) // } // return nil // } @@ -13351,6 +14467,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -13402,10 +14521,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -13416,6 +14535,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -13467,10 +14589,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -13484,10 +14606,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -13543,18 +14665,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.IsFlushing() -// if err := checkFunc(test.want, got); err != nil { +// gotUuids := n.UUIDs(test.args.ctx) +// if err := checkFunc(test.want, gotUuids); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_UUIDs(t *testing.T) { -// type args struct { -// ctx context.Context -// } +// func Test_ngt_NumberOfCreateIndexExecution(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -13605,20 +14724,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantUuids []string +// want uint64 // } // type test struct { // name string -// args args // fields fields // want want -// checkFunc func(want, []string) error -// beforeFunc func(*testing.T, args) -// afterFunc func(*testing.T, args) +// checkFunc func(want, uint64) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, gotUuids []string) error { -// if !reflect.DeepEqual(gotUuids, w.wantUuids) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotUuids, w.wantUuids) +// defaultCheckFunc := func(w want, got uint64) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } // return nil // } @@ -13627,9 +14745,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", -// args: args { -// ctx:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -13681,10 +14796,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // }, @@ -13695,9 +14810,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", -// args: args { -// ctx:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -13749,10 +14861,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // } @@ -13766,10 +14878,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt, test.args) +// test.beforeFunc(tt) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt, test.args) +// defer test.afterFunc(tt) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -13825,15 +14937,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotUuids := n.UUIDs(test.args.ctx) -// if err := checkFunc(test.want, gotUuids); err != nil { +// got := n.NumberOfCreateIndexExecution() +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_NumberOfCreateIndexExecution(t *testing.T) { +// func Test_ngt_NumberOfProactiveGCExecution(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -14097,7 +15209,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.NumberOfCreateIndexExecution() +// got := n.NumberOfProactiveGCExecution() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -14105,7 +15217,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_NumberOfProactiveGCExecution(t *testing.T) { +// func Test_ngt_lastNumberOfCreateIndexExecution(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -14369,7 +15481,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.NumberOfProactiveGCExecution() +// got := n.lastNumberOfCreateIndexExecution() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -14377,7 +15489,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_lastNumberOfCreateIndexExecution(t *testing.T) { +// func Test_ngt_gc(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -14427,21 +15539,16 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct { -// want uint64 -// } +// type want struct{} // type test struct { // name string // fields fields // want want -// checkFunc func(want, uint64) error +// checkFunc func(want) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got uint64) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) -// } +// defaultCheckFunc := func(w want) error { // return nil // } // tests := []test{ @@ -14641,15 +15748,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.lastNumberOfCreateIndexExecution() -// if err := checkFunc(test.want, got); err != nil { +// n.gc() +// if err := checkFunc(test.want); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_gc(t *testing.T) { +// func Test_ngt_Len(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -14699,16 +15806,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct{} +// type want struct { +// want uint64 +// } // type test struct { // name string // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, uint64) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, got uint64) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } // return nil // } // tests := []test{ @@ -14908,15 +16020,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// n.gc() -// if err := checkFunc(test.want); err != nil { +// got := n.Len() +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_Len(t *testing.T) { +// func Test_ngt_InsertVQueueBufferLen(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -15180,7 +16292,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.Len() +// got := n.InsertVQueueBufferLen() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -15188,7 +16300,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_InsertVQueueBufferLen(t *testing.T) { +// func Test_ngt_DeleteVQueueBufferLen(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -15452,7 +16564,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.InsertVQueueBufferLen() +// got := n.DeleteVQueueBufferLen() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -15460,7 +16572,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_DeleteVQueueBufferLen(t *testing.T) { +// func Test_ngt_GetDimensionSize(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -15511,17 +16623,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want uint64 +// want int // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, uint64) error +// checkFunc func(want, int) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got uint64) error { +// defaultCheckFunc := func(w want, got int) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -15724,7 +16836,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.DeleteVQueueBufferLen() +// got := n.GetDimensionSize() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -15732,7 +16844,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_GetDimensionSize(t *testing.T) { +// func Test_ngt_BrokenIndexCount(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -15783,17 +16895,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want int +// want uint64 // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, int) error +// checkFunc func(want, uint64) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got int) error { +// defaultCheckFunc := func(w want, got uint64) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -15996,7 +17108,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.GetDimensionSize() +// got := n.BrokenIndexCount() // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -16004,7 +17116,11 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // } // -// func Test_ngt_BrokenIndexCount(t *testing.T) { +// func Test_ngt_ListObjectFunc(t *testing.T) { +// type args struct { +// ctx context.Context +// f func(uuid string, oid uint32, ts int64) bool +// } // type fields struct { // core core.NGT // eg errgroup.Group @@ -16054,21 +17170,17 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct { -// want uint64 -// } +// type want struct{} // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, uint64) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got uint64) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) -// } +// defaultCheckFunc := func(w want) error { // return nil // } // tests := []test{ @@ -16076,6 +17188,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// f:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -16127,10 +17243,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -16141,6 +17257,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// f:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -16192,10 +17312,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -16209,10 +17329,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -16268,19 +17388,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.BrokenIndexCount() -// if err := checkFunc(test.want, got); err != nil { +// n.ListObjectFunc(test.args.ctx, test.args.f) +// if err := checkFunc(test.want); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_ListObjectFunc(t *testing.T) { -// type args struct { -// ctx context.Context -// f func(uuid string, oid uint32, ts int64) bool -// } +// func Test_ngt_IndexStatistics(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -16330,17 +17446,25 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // enableStatistics bool // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } -// type want struct{} +// type want struct { +// wantStats *payload.Info_Index_Statistics +// err error +// } // type test struct { // name string -// args args // fields fields // want want -// checkFunc func(want) error -// beforeFunc func(*testing.T, args) -// afterFunc func(*testing.T, args) +// checkFunc func(want, *payload.Info_Index_Statistics, error) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotStats *payload.Info_Index_Statistics, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotStats, w.wantStats) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotStats, w.wantStats) +// } // return nil // } // tests := []test{ @@ -16348,10 +17472,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", -// args: args { -// ctx:nil, -// f:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -16403,10 +17523,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // }, @@ -16417,10 +17537,6 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", -// args: args { -// ctx:nil, -// f:nil, -// }, // fields: fields { // core:nil, // eg:nil, @@ -16472,10 +17588,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T, args args) { +// beforeFunc: func(t *testing.T,) { // t.Helper() // }, -// afterFunc: func(t *testing.T, args args) { +// afterFunc: func(t *testing.T,) { // t.Helper() // }, // } @@ -16489,10 +17605,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt, test.args) +// test.beforeFunc(tt) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt, test.args) +// defer test.afterFunc(tt) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -16548,15 +17664,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// n.ListObjectFunc(test.args.ctx, test.args.f) -// if err := checkFunc(test.want); err != nil { +// gotStats, err := n.IndexStatistics() +// if err := checkFunc(test.want, gotStats, err); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_IndexStatistics(t *testing.T) { +// func Test_ngt_IsStatisticsEnabled(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -16607,23 +17723,19 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// wantStats *payload.Info_Index_Statistics -// err error +// want bool // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, *payload.Info_Index_Statistics, error) error +// checkFunc func(want, bool) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, gotStats *payload.Info_Index_Statistics, err error) error { -// if !errors.Is(err, w.err) { -// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) -// } -// if !reflect.DeepEqual(gotStats, w.wantStats) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotStats, w.wantStats) +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } // return nil // } @@ -16824,15 +17936,15 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// gotStats, err := n.IndexStatistics() -// if err := checkFunc(test.want, gotStats, err); err != nil { +// got := n.IsStatisticsEnabled() +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_ngt_IsStatisticsEnabled(t *testing.T) { +// func Test_ngt_IndexProperty(t *testing.T) { // type fields struct { // core core.NGT // eg errgroup.Group @@ -16883,17 +17995,21 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache atomic.Pointer[payload.Info_Index_Statistics] // } // type want struct { -// want bool +// want *payload.Info_Index_Property +// err error // } // type test struct { // name string // fields fields // want want -// checkFunc func(want, bool) error +// checkFunc func(want, *payload.Info_Index_Property, error) error // beforeFunc func(*testing.T) // afterFunc func(*testing.T) // } -// defaultCheckFunc := func(w want, got bool) error { +// defaultCheckFunc := func(w want, got *payload.Info_Index_Property, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -17096,8 +18212,8 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// got := n.IsStatisticsEnabled() -// if err := checkFunc(test.want, got); err != nil { +// got, err := n.IndexProperty() +// if err := checkFunc(test.want, got, err); err != nil { // tt.Errorf("error = %v", err) // } // }) diff --git a/pkg/agent/internal/kvs/kvs.go b/pkg/agent/internal/kvs/kvs.go index 0b5cec14ef..713220b9d3 100644 --- a/pkg/agent/internal/kvs/kvs.go +++ b/pkg/agent/internal/kvs/kvs.go @@ -51,10 +51,10 @@ type ValueStructUo struct { } type bidi struct { - concurrency int - l uint64 ou [slen]*sync.Map[uint32, valueStructOu] uo [slen]*sync.Map[string, ValueStructUo] + concurrency int + l uint64 } const ( @@ -64,6 +64,8 @@ const ( // mask is slen-1 Hex value. mask = 0x1FF // mask = 0xFFF. + + maxHashKeyLength = slen / 2 ) // New returns the bidi that satisfies the BidiMap interface. @@ -186,8 +188,8 @@ func (b *bidi) Close() error { } func getShardID(key string) (id uint64) { - if len(key) > 128 { - return xxh3.HashString(key[:128]) & mask + if len(key) > maxHashKeyLength { + return xxh3.HashString(key[:maxHashKeyLength]) & mask } return xxh3.HashString(key) & mask } diff --git a/pkg/agent/internal/kvs/kvs_test.go b/pkg/agent/internal/kvs/kvs_test.go index e42213ff36..1d8f0cc8f7 100644 --- a/pkg/agent/internal/kvs/kvs_test.go +++ b/pkg/agent/internal/kvs/kvs_test.go @@ -229,7 +229,7 @@ func Test_bidi_Get(t *testing.T) { ) return test{ - name: "return the value when there is a value for the key and l of fields is maximun value of uint64", + name: "return the value when there is a value for the key and l of fields is maximum value of uint64", args: args{ key: key, }, @@ -496,7 +496,7 @@ func Test_bidi_GetInverse(t *testing.T) { ) return test{ - name: "return key and timestamp and true when there is a key for the value and l of fields is maximun value of uint64", + name: "return key and timestamp and true when there is a key for the value and l of fields is maximum value of uint64", args: args{ val: val, }, @@ -770,7 +770,7 @@ func Test_bidi_Set(t *testing.T) { ) return test{ - name: "set success when the key is not empty string and val is not 0 and l of fields is maximun value of uint64", + name: "set success when the key is not empty string and val is not 0 and l of fields is maximum value of uint64", args: args{ key: key, val: val, @@ -806,7 +806,7 @@ func Test_bidi_Set(t *testing.T) { ) return test{ - name: "set success when the key is already set and the same key is set twie", + name: "set success when the key is already set and the same key is set twice", args: args{ key: key, val: val, @@ -1071,7 +1071,7 @@ func Test_bidi_Delete(t *testing.T) { ) return test{ - name: "return val and true when the delete successes and l of fields is maximun value of uint64", + name: "return val and true when the delete successes and l of fields is maximum value of uint64", args: args{ key: key, }, @@ -1353,7 +1353,7 @@ func Test_bidi_DeleteInverse(t *testing.T) { ) return test{ - name: "return key and true when the delete successes and l of fields is maximun value of uint64", + name: "return key and true when the delete successes and l of fields is maximum value of uint64", args: args{ val: val, }, @@ -1644,7 +1644,7 @@ func Test_bidi_Range(t *testing.T) { var mu sync.Mutex return test{ - name: "rage get successes when l of fields is maximun value of uint64", + name: "rage get successes when l of fields is maximum value of uint64", args: args{ f: func(s string, u uint32, t int64) bool { mu.Lock() @@ -1743,7 +1743,7 @@ func Test_bidi_Len(t *testing.T) { }, }, { - name: "return maximun value when l of field is maximun value of uint64", + name: "return maximum value when l of field is maximum value of uint64", fields: fields{ l: math.MaxUint64, }, @@ -1793,10 +1793,10 @@ func Test_bidi_Len(t *testing.T) { // // func Test_bidi_Close(t *testing.T) { // type fields struct { -// concurrency int -// l uint64 // ou [slen]*sync.Map[uint32, valueStructOu] // uo [slen]*sync.Map[string, ValueStructUo] +// concurrency int +// l uint64 // } // type want struct { // err error @@ -1821,10 +1821,10 @@ func Test_bidi_Len(t *testing.T) { // { // name: "test_case_1", // fields: fields { -// concurrency:0, -// l:0, // ou:nil, // uo:nil, +// concurrency:0, +// l:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1843,10 +1843,10 @@ func Test_bidi_Len(t *testing.T) { // return test { // name: "test_case_2", // fields: fields { -// concurrency:0, -// l:0, // ou:nil, // uo:nil, +// concurrency:0, +// l:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1877,10 +1877,10 @@ func Test_bidi_Len(t *testing.T) { // checkFunc = defaultCheckFunc // } // b := &bidi{ -// concurrency: test.fields.concurrency, -// l: test.fields.l, // ou: test.fields.ou, // uo: test.fields.uo, +// concurrency: test.fields.concurrency, +// l: test.fields.l, // } // // err := b.Close() diff --git a/pkg/agent/internal/vqueue/queue.go b/pkg/agent/internal/vqueue/queue.go index 870a96a9f6..ed105b191b 100644 --- a/pkg/agent/internal/vqueue/queue.go +++ b/pkg/agent/internal/vqueue/queue.go @@ -32,14 +32,17 @@ import ( // Queue represents vector queue cache interface. type Queue interface { - PushInsert(uuid string, vector []float32, date int64) error - PushDelete(uuid string, date int64) error + PushInsert(uuid string, vector []float32, timestamp int64) error + PushDelete(uuid string, timestamp int64) error + PopInsert(uuid string) (vector []float32, timestamp int64, ok bool) + PopDelete(uuid string) (timestamp int64, ok bool) GetVector(uuid string) (vec []float32, timestamp int64, exists bool) Range(ctx context.Context, f func(uuid string, vector []float32, ts int64) bool) - RangePopInsert(ctx context.Context, now int64, f func(uuid string, vector []float32, date int64) bool) + GetVectorWithTimestamp(uuid string) (vec []float32, its, dts int64, exists bool) + RangePopInsert(ctx context.Context, now int64, f func(uuid string, vector []float32, timestamp int64) bool) RangePopDelete(ctx context.Context, now int64, f func(uuid string) bool) - IVExists(uuid string) bool - DVExists(uuid string) bool + IVExists(uuid string) (timestamp int64, ok bool) + DVExists(uuid string) (timestamp int64, ok bool) IVQLen() int DVQLen() int } @@ -50,9 +53,9 @@ type vqueue struct { } type index struct { - date int64 - vector []float32 - uuid string + uuid string + vector []float32 + timestamp int64 } func New(opts ...Option) (Queue, error) { @@ -72,22 +75,25 @@ func New(opts ...Option) (Queue, error) { return vq, nil } -func (v *vqueue) PushInsert(uuid string, vector []float32, date int64) error { - if date == 0 { - date = time.Now().UnixNano() +func (v *vqueue) PushInsert(uuid string, vector []float32, timestamp int64) error { + if len(uuid) == 0 || vector == nil { + return nil + } + if timestamp == 0 { + timestamp = time.Now().UnixNano() } - didx, ok := v.dl.Load(uuid) - if ok && didx.date > date { + dts, ok := v.loadDVQ(uuid) + if ok && newer(dts, timestamp) { return nil } idx := index{ - uuid: uuid, - vector: vector, - date: date, + uuid: uuid, + vector: vector, + timestamp: timestamp, } oidx, loaded := v.il.LoadOrStore(uuid, &idx) if loaded { - if date > oidx.date { // if data already exists and existing index is older than new one + if newer(timestamp, oidx.timestamp) { // if data already exists and existing index is older than new one v.il.Store(uuid, &idx) } } else { @@ -96,17 +102,20 @@ func (v *vqueue) PushInsert(uuid string, vector []float32, date int64) error { return nil } -func (v *vqueue) PushDelete(uuid string, date int64) error { - if date == 0 { - date = time.Now().UnixNano() +func (v *vqueue) PushDelete(uuid string, timestamp int64) error { + if len(uuid) == 0 { + return nil + } + if timestamp == 0 { + timestamp = time.Now().UnixNano() } idx := index{ - uuid: uuid, - date: date, + uuid: uuid, + timestamp: timestamp, } oidx, loaded := v.dl.LoadOrStore(uuid, &idx) if loaded { - if date > oidx.date { // if data already exists and existing index is older than new one + if newer(timestamp, oidx.timestamp) { // if data already exists and existing index is older than new one v.dl.Store(uuid, &idx) } } else { @@ -115,81 +124,103 @@ func (v *vqueue) PushDelete(uuid string, date int64) error { return nil } +func (v *vqueue) PopInsert(uuid string) (vector []float32, timestamp int64, ok bool) { + var idx *index + idx, ok = v.il.LoadAndDelete(uuid) + if !ok || idx == nil || idx.timestamp == 0 { + return nil, 0, false + } + _ = atomic.AddUint64(&v.ic, ^uint64(0)) + return idx.vector, idx.timestamp, ok +} + +func (v *vqueue) PopDelete(uuid string) (timestamp int64, ok bool) { + var idx *index + idx, ok = v.dl.LoadAndDelete(uuid) + if !ok || idx == nil || idx.timestamp == 0 { + return 0, false + } + _ = atomic.AddUint64(&v.dc, ^uint64(0)) + return idx.timestamp, ok +} + // GetVector returns the vector stored in the queue. +func (v *vqueue) GetVector(uuid string) (vec []float32, timestamp int64, exists bool) { + vec, timestamp, _, exists = v.getVector(uuid, false) + return vec, timestamp, exists +} + +// GetVectorWithTimestamp returns the vector and timestamps stored in the queue. +func (v *vqueue) GetVectorWithTimestamp(uuid string) (vec []float32, its, dts int64, exists bool) { + return v.getVector(uuid, true) +} + +// getVector returns the vector and timestamps stored in the queue. // If the same UUID exists in the insert queue and the delete queue, the timestamp is compared. // And the vector is returned if the timestamp in the insert queue is newer than the delete queue. -func (v *vqueue) GetVector(uuid string) (vec []float32, timestamp int64, exists bool) { - idx, ok := v.il.Load(uuid) - if !ok { - // data not in the insert queue then return not exists(false) - return nil, 0, false +func (v *vqueue) getVector( + uuid string, enableDeleteTimestamp bool, +) (vec []float32, its, dts int64, ok bool) { + vec, its, ok = v.loadIVQ(uuid) + if !ok || vec == nil { + if !enableDeleteTimestamp { + // data not in the insert queue then return not exists(false) + return nil, 0, 0, false + } + dts, ok = v.loadDVQ(uuid) + if !ok || dts == 0 { + // data not in the delete queue and insert queue then return not exists(false) + return nil, 0, 0, false + } + // data not in theinsert queue and exists in delete queue then return not exists(false) with delete index timestamp + return nil, 0, dts, false } - didx, ok := v.dl.Load(uuid) - if !ok { + dts, ok = v.loadDVQ(uuid) + if !ok || dts == 0 { // data not in the delete queue but exists in insert queue then return exists(true) - return idx.vector, idx.date, true + return vec, its, 0, vec != nil // usually vec is non-nil which means true } - // data exists both queue, compare data timestamp if insert queue timestamp is newer than delete one, this function returns exists(true) - if didx.date <= idx.date { - return idx.vector, idx.date, true - } - return nil, 0, false + // data exists both queue, compare data timestamp if insert queue timestamp is newer than delete one last value will true + // However, if insert and delete are sent by the update instruction, the timestamp will be the same + return vec, its, dts, vec != nil && newer(its, dts) // ususaly vec is non-nil } -// IVExists returns true if there is the UUID in the insert queue. +// IVExists returns timestamp of iv and true if there is the UUID in the insert queue. // If the same UUID exists in the insert queue and the delete queue, the timestamp is compared. // And the true is returned if the timestamp in the insert queue is newer than the delete queue. -func (v *vqueue) IVExists(uuid string) bool { - idx, ok := v.il.Load(uuid) - if !ok { - // data not in the insert queue then return not exists(false) - return false - } - didx, ok := v.dl.Load(uuid) - if !ok { - // data not in the delete queue but exists in insert queue then return exists(true) - return true +func (v *vqueue) IVExists(uuid string) (its int64, ok bool) { + _, its, _, ok = v.getVector(uuid, false) + if !ok || its == 0 { + return 0, false } - // data exists both queue, compare data timestamp if insert queue timestamp is newer than delete one, this function returns exists(true) - // However, if insert and delete are sent by the update instruction, the timestamp will be the same - return didx.date <= idx.date + return its, true } -// DVExists returns true if there is the UUID in the delete queue. +// DVExists returns timestamp of dv and true if there is the UUID in the delete queue. // If the same UUID exists in the insert queue and the delete queue, the timestamp is compared. // And the true is returned if the timestamp in the delete queue is newer than the insert queue. -func (v *vqueue) DVExists(uuid string) bool { - didx, ok := v.dl.Load(uuid) - if !ok { - return false - } - idx, ok := v.il.Load(uuid) - if !ok { - // data not in the insert queue then return not exists(false) - return true +func (v *vqueue) DVExists(uuid string) (dts int64, ok bool) { + _, _, dts, ok = v.getVector(uuid, true) + if ok || dts == 0 { + return 0, false } - - // data exists both queue, compare data timestamp if insert queue timestamp is newer than delete one, this function returns exists(true) - return didx.date > idx.date + return dts, true } func (v *vqueue) RangePopInsert( - ctx context.Context, now int64, f func(uuid string, vector []float32, date int64) bool, + ctx context.Context, now int64, f func(uuid string, vector []float32, timestamp int64) bool, ) { uii := make([]index, 0, atomic.LoadUint64(&v.ic)) defer func() { uii = nil }() v.il.Range(func(uuid string, idx *index) bool { - if idx.date > now { + if newer(idx.timestamp, now) { return true } - didx, ok := v.dl.Load(uuid) - if ok { - if idx.date < didx.date { - v.il.Delete(idx.uuid) - atomic.AddUint64(&v.ic, ^uint64(0)) - } + dts, ok := v.loadDVQ(uuid) + if ok && newer(dts, idx.timestamp) { + _, _, _ = v.PopInsert(uuid) return true } uii = append(uii, *idx) @@ -201,14 +232,14 @@ func (v *vqueue) RangePopInsert( return true }) slices.SortFunc(uii, func(left, right index) int { - return cmp.Compare(right.date, left.date) + return cmp.Compare(right.timestamp, left.timestamp) }) for _, idx := range uii { - if !f(idx.uuid, idx.vector, idx.date) { + if !f(idx.uuid, idx.vector, idx.timestamp) { return } - v.il.Delete(idx.uuid) - atomic.AddUint64(&v.ic, ^uint64(0)) + + _, _, _ = v.PopInsert(idx.uuid) select { case <-ctx.Done(): return @@ -223,7 +254,7 @@ func (v *vqueue) RangePopDelete(ctx context.Context, now int64, f func(uuid stri udi = nil }() v.dl.Range(func(_ string, idx *index) bool { - if idx.date > now { + if newer(idx.timestamp, now) { return true } udi = append(udi, *idx) @@ -235,18 +266,16 @@ func (v *vqueue) RangePopDelete(ctx context.Context, now int64, f func(uuid stri return true }) slices.SortFunc(udi, func(left, right index) int { - return cmp.Compare(right.date, left.date) + return cmp.Compare(right.timestamp, left.timestamp) }) - for _, idx := range udi { - if !f(idx.uuid) { + for _, didx := range udi { + if !f(didx.uuid) { return } - v.dl.Delete(idx.uuid) - atomic.AddUint64(&v.dc, ^uint64(0)) - iidx, ok := v.il.Load(idx.uuid) - if ok && idx.date > iidx.date { - v.il.Delete(idx.uuid) - atomic.AddUint64(&v.ic, ^uint64(0)) + _, _ = v.PopDelete(didx.uuid) + _, its, ok := v.loadIVQ(didx.uuid) + if ok && newer(didx.timestamp, its) { + _, _, _ = v.PopInsert(didx.uuid) } select { case <-ctx.Done(): @@ -263,9 +292,9 @@ func (v *vqueue) Range(_ context.Context, f func(uuid string, vector []float32, if idx == nil { return true } - didx, ok := v.dl.Load(uuid) - if !ok || (didx != nil && idx.date > didx.date) { - return f(uuid, idx.vector, idx.date) + dts, ok := v.loadDVQ(uuid) + if !ok || newer(idx.timestamp, dts) { + return f(uuid, idx.vector, idx.timestamp) } return true }) @@ -280,3 +309,25 @@ func (v *vqueue) IVQLen() (l int) { func (v *vqueue) DVQLen() (l int) { return int(atomic.LoadUint64(&v.dc)) } + +func (v *vqueue) loadIVQ(uuid string) (vec []float32, ts int64, ok bool) { + var idx *index + idx, ok = v.il.Load(uuid) + if !ok || idx == nil { + return nil, 0, false + } + return idx.vector, idx.timestamp, true +} + +func (v *vqueue) loadDVQ(uuid string) (ts int64, ok bool) { + var idx *index + idx, ok = v.dl.Load(uuid) + if !ok || idx == nil { + return 0, false + } + return idx.timestamp, true +} + +func newer(ts1, ts2 int64) bool { + return ts1 > ts2 +} diff --git a/pkg/agent/internal/vqueue/queue_test.go b/pkg/agent/internal/vqueue/queue_test.go index 3e3369afed..f67953ae10 100644 --- a/pkg/agent/internal/vqueue/queue_test.go +++ b/pkg/agent/internal/vqueue/queue_test.go @@ -172,9 +172,9 @@ func TestGetVector(t *testing.T) { // // func Test_vqueue_PushInsert(t *testing.T) { // type args struct { -// uuid string -// vector []float32 -// date int64 +// uuid string +// vector []float32 +// timestamp int64 // } // type fields struct { // il sync.Map[string, *index] @@ -208,7 +208,7 @@ func TestGetVector(t *testing.T) { // args: args { // uuid:"", // vector:nil, -// date:0, +// timestamp:0, // }, // fields: fields { // il:nil, @@ -235,7 +235,7 @@ func TestGetVector(t *testing.T) { // args: args { // uuid:"", // vector:nil, -// date:0, +// timestamp:0, // }, // fields: fields { // il:nil, @@ -278,7 +278,7 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// err := v.PushInsert(test.args.uuid, test.args.vector, test.args.date) +// err := v.PushInsert(test.args.uuid, test.args.vector, test.args.timestamp) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -288,8 +288,8 @@ func TestGetVector(t *testing.T) { // // func Test_vqueue_PushDelete(t *testing.T) { // type args struct { -// uuid string -// date int64 +// uuid string +// timestamp int64 // } // type fields struct { // il sync.Map[string, *index] @@ -322,7 +322,7 @@ func TestGetVector(t *testing.T) { // name: "test_case_1", // args: args { // uuid:"", -// date:0, +// timestamp:0, // }, // fields: fields { // il:nil, @@ -348,7 +348,7 @@ func TestGetVector(t *testing.T) { // name: "test_case_2", // args: args { // uuid:"", -// date:0, +// timestamp:0, // }, // fields: fields { // il:nil, @@ -391,7 +391,7 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// err := v.PushDelete(test.args.uuid, test.args.date) +// err := v.PushDelete(test.args.uuid, test.args.timestamp) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -399,7 +399,7 @@ func TestGetVector(t *testing.T) { // } // } // -// func Test_vqueue_GetVector(t *testing.T) { +// func Test_vqueue_PopInsert(t *testing.T) { // type args struct { // uuid string // } @@ -410,9 +410,9 @@ func TestGetVector(t *testing.T) { // dc uint64 // } // type want struct { -// wantVec []float32 +// wantVector []float32 // wantTimestamp int64 -// wantExists bool +// wantOk bool // } // type test struct { // name string @@ -423,15 +423,15 @@ func TestGetVector(t *testing.T) { // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, gotExists bool) error { -// if !reflect.DeepEqual(gotVec, w.wantVec) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// defaultCheckFunc := func(w want, gotVector []float32, gotTimestamp int64, gotOk bool) error { +// if !reflect.DeepEqual(gotVector, w.wantVector) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVector, w.wantVector) // } // if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) // } -// if !reflect.DeepEqual(gotExists, w.wantExists) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotExists, w.wantExists) +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -509,15 +509,15 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// gotVec, gotTimestamp, gotExists := v.GetVector(test.args.uuid) -// if err := checkFunc(test.want, gotVec, gotTimestamp, gotExists); err != nil { +// gotVector, gotTimestamp, gotOk := v.PopInsert(test.args.uuid) +// if err := checkFunc(test.want, gotVector, gotTimestamp, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_IVExists(t *testing.T) { +// func Test_vqueue_PopDelete(t *testing.T) { // type args struct { // uuid string // } @@ -528,20 +528,24 @@ func TestGetVector(t *testing.T) { // dc uint64 // } // type want struct { -// want bool +// wantTimestamp int64 +// wantOk bool // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, bool) error +// checkFunc func(want, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotTimestamp int64, gotOk bool) error { +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -619,15 +623,15 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// got := v.IVExists(test.args.uuid) -// if err := checkFunc(test.want, got); err != nil { +// gotTimestamp, gotOk := v.PopDelete(test.args.uuid) +// if err := checkFunc(test.want, gotTimestamp, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_DVExists(t *testing.T) { +// func Test_vqueue_GetVector(t *testing.T) { // type args struct { // uuid string // } @@ -638,20 +642,28 @@ func TestGetVector(t *testing.T) { // dc uint64 // } // type want struct { -// want bool +// wantVec []float32 +// wantTimestamp int64 +// wantExists bool // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, bool) error +// checkFunc func(want, []float32, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotVec []float32, gotTimestamp int64, gotExists bool) error { +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTimestamp, w.wantTimestamp) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTimestamp, w.wantTimestamp) +// } +// if !reflect.DeepEqual(gotExists, w.wantExists) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotExists, w.wantExists) // } // return nil // } @@ -729,19 +741,17 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// got := v.DVExists(test.args.uuid) -// if err := checkFunc(test.want, got); err != nil { +// gotVec, gotTimestamp, gotExists := v.GetVector(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotTimestamp, gotExists); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_RangePopInsert(t *testing.T) { +// func Test_vqueue_GetVectorWithTimestamp(t *testing.T) { // type args struct { -// ctx context.Context -// now int64 -// f func(uuid string, vector []float32, date int64) bool +// uuid string // } // type fields struct { // il sync.Map[string, *index] @@ -749,17 +759,34 @@ func TestGetVector(t *testing.T) { // ic uint64 // dc uint64 // } -// type want struct{} +// type want struct { +// wantVec []float32 +// wantIts int64 +// wantDts int64 +// wantExists bool +// } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, []float32, int64, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotVec []float32, gotIts int64, gotDts int64, gotExists bool) error { +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotIts, w.wantIts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIts, w.wantIts) +// } +// if !reflect.DeepEqual(gotDts, w.wantDts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDts, w.wantDts) +// } +// if !reflect.DeepEqual(gotExists, w.wantExists) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotExists, w.wantExists) +// } // return nil // } // tests := []test{ @@ -768,9 +795,7 @@ func TestGetVector(t *testing.T) { // { // name: "test_case_1", // args: args { -// ctx:nil, -// now:0, -// f:nil, +// uuid:"", // }, // fields: fields { // il:nil, @@ -795,9 +820,7 @@ func TestGetVector(t *testing.T) { // return test { // name: "test_case_2", // args: args { -// ctx:nil, -// now:0, -// f:nil, +// uuid:"", // }, // fields: fields { // il:nil, @@ -840,19 +863,18 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// v.RangePopInsert(test.args.ctx, test.args.now, test.args.f) -// if err := checkFunc(test.want); err != nil { +// gotVec, gotIts, gotDts, gotExists := v.GetVectorWithTimestamp(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotIts, gotDts, gotExists); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_RangePopDelete(t *testing.T) { +// func Test_vqueue_getVector(t *testing.T) { // type args struct { -// ctx context.Context -// now int64 -// f func(uuid string) bool +// uuid string +// enableDeleteTimestamp bool // } // type fields struct { // il sync.Map[string, *index] @@ -860,17 +882,34 @@ func TestGetVector(t *testing.T) { // ic uint64 // dc uint64 // } -// type want struct{} +// type want struct { +// wantVec []float32 +// wantIts int64 +// wantDts int64 +// wantOk bool +// } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, []float32, int64, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotVec []float32, gotIts int64, gotDts int64, gotOk bool) error { +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotIts, w.wantIts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIts, w.wantIts) +// } +// if !reflect.DeepEqual(gotDts, w.wantDts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDts, w.wantDts) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } // return nil // } // tests := []test{ @@ -879,9 +918,8 @@ func TestGetVector(t *testing.T) { // { // name: "test_case_1", // args: args { -// ctx:nil, -// now:0, -// f:nil, +// uuid:"", +// enableDeleteTimestamp:false, // }, // fields: fields { // il:nil, @@ -906,9 +944,8 @@ func TestGetVector(t *testing.T) { // return test { // name: "test_case_2", // args: args { -// ctx:nil, -// now:0, -// f:nil, +// uuid:"", +// enableDeleteTimestamp:false, // }, // fields: fields { // il:nil, @@ -951,18 +988,17 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// v.RangePopDelete(test.args.ctx, test.args.now, test.args.f) -// if err := checkFunc(test.want); err != nil { +// gotVec, gotIts, gotDts, gotOk := v.getVector(test.args.uuid, test.args.enableDeleteTimestamp) +// if err := checkFunc(test.want, gotVec, gotIts, gotDts, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_Range(t *testing.T) { +// func Test_vqueue_IVExists(t *testing.T) { // type args struct { -// in0 context.Context -// f func(uuid string, vector []float32, ts int64) bool +// uuid string // } // type fields struct { // il sync.Map[string, *index] @@ -970,17 +1006,26 @@ func TestGetVector(t *testing.T) { // ic uint64 // dc uint64 // } -// type want struct{} +// type want struct { +// wantIts int64 +// wantOk bool +// } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want) error +// checkFunc func(want, int64, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want) error { +// defaultCheckFunc := func(w want, gotIts int64, gotOk bool) error { +// if !reflect.DeepEqual(gotIts, w.wantIts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIts, w.wantIts) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } // return nil // } // tests := []test{ @@ -989,8 +1034,7 @@ func TestGetVector(t *testing.T) { // { // name: "test_case_1", // args: args { -// in0:nil, -// f:nil, +// uuid:"", // }, // fields: fields { // il:nil, @@ -1015,8 +1059,7 @@ func TestGetVector(t *testing.T) { // return test { // name: "test_case_2", // args: args { -// in0:nil, -// f:nil, +// uuid:"", // }, // fields: fields { // il:nil, @@ -1059,15 +1102,18 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// v.Range(test.args.in0, test.args.f) -// if err := checkFunc(test.want); err != nil { +// gotIts, gotOk := v.IVExists(test.args.uuid) +// if err := checkFunc(test.want, gotIts, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_IVQLen(t *testing.T) { +// func Test_vqueue_DVExists(t *testing.T) { +// type args struct { +// uuid string +// } // type fields struct { // il sync.Map[string, *index] // dl sync.Map[string, *index] @@ -1075,19 +1121,24 @@ func TestGetVector(t *testing.T) { // dc uint64 // } // type want struct { -// wantL int +// wantDts int64 +// wantOk bool // } // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, int) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotL int) error { -// if !reflect.DeepEqual(gotL, w.wantL) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotL, w.wantL) +// defaultCheckFunc := func(w want, gotDts int64, gotOk bool) error { +// if !reflect.DeepEqual(gotDts, w.wantDts) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDts, w.wantDts) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -1096,6 +1147,9 @@ func TestGetVector(t *testing.T) { // /* // { // name: "test_case_1", +// args: args { +// uuid:"", +// }, // fields: fields { // il:nil, // dl:nil, @@ -1104,10 +1158,10 @@ func TestGetVector(t *testing.T) { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -1118,6 +1172,9 @@ func TestGetVector(t *testing.T) { // func() test { // return test { // name: "test_case_2", +// args: args { +// uuid:"", +// }, // fields: fields { // il:nil, // dl:nil, @@ -1126,10 +1183,10 @@ func TestGetVector(t *testing.T) { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -1143,10 +1200,10 @@ func TestGetVector(t *testing.T) { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -1159,36 +1216,37 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// gotL := v.IVQLen() -// if err := checkFunc(test.want, gotL); err != nil { +// gotDts, gotOk := v.DVExists(test.args.uuid) +// if err := checkFunc(test.want, gotDts, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) // } // } // -// func Test_vqueue_DVQLen(t *testing.T) { +// func Test_vqueue_RangePopInsert(t *testing.T) { +// type args struct { +// ctx context.Context +// now int64 +// f func(uuid string, vector []float32, timestamp int64) bool +// } // type fields struct { // il sync.Map[string, *index] // dl sync.Map[string, *index] // ic uint64 // dc uint64 // } -// type want struct { -// wantL int -// } +// type want struct{} // type test struct { // name string +// args args // fields fields // want want -// checkFunc func(want, int) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotL int) error { -// if !reflect.DeepEqual(gotL, w.wantL) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotL, w.wantL) -// } +// defaultCheckFunc := func(w want) error { // return nil // } // tests := []test{ @@ -1196,6 +1254,11 @@ func TestGetVector(t *testing.T) { // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// now:0, +// f:nil, +// }, // fields: fields { // il:nil, // dl:nil, @@ -1204,10 +1267,10 @@ func TestGetVector(t *testing.T) { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -1218,6 +1281,11 @@ func TestGetVector(t *testing.T) { // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// now:0, +// f:nil, +// }, // fields: fields { // il:nil, // dl:nil, @@ -1226,10 +1294,10 @@ func TestGetVector(t *testing.T) { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -1243,10 +1311,10 @@ func TestGetVector(t *testing.T) { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -1259,8 +1327,747 @@ func TestGetVector(t *testing.T) { // dc: test.fields.dc, // } // -// gotL := v.DVQLen() -// if err := checkFunc(test.want, gotL); err != nil { +// v.RangePopInsert(test.args.ctx, test.args.now, test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_RangePopDelete(t *testing.T) { +// type args struct { +// ctx context.Context +// now int64 +// f func(uuid string) bool +// } +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// now:0, +// f:nil, +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// now:0, +// f:nil, +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// v.RangePopDelete(test.args.ctx, test.args.now, test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_Range(t *testing.T) { +// type args struct { +// in0 context.Context +// f func(uuid string, vector []float32, ts int64) bool +// } +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// in0:nil, +// f:nil, +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// in0:nil, +// f:nil, +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// v.Range(test.args.in0, test.args.f) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_IVQLen(t *testing.T) { +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct { +// wantL int +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, int) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, gotL int) error { +// if !reflect.DeepEqual(gotL, w.wantL) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotL, w.wantL) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// gotL := v.IVQLen() +// if err := checkFunc(test.want, gotL); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_DVQLen(t *testing.T) { +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct { +// wantL int +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, int) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, gotL int) error { +// if !reflect.DeepEqual(gotL, w.wantL) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotL, w.wantL) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// gotL := v.DVQLen() +// if err := checkFunc(test.want, gotL); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_loadIVQ(t *testing.T) { +// type args struct { +// uuid string +// } +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct { +// wantVec []float32 +// wantTs int64 +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, []float32, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotVec []float32, gotTs int64, gotOk bool) error { +// if !reflect.DeepEqual(gotVec, w.wantVec) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotVec, w.wantVec) +// } +// if !reflect.DeepEqual(gotTs, w.wantTs) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTs, w.wantTs) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// uuid:"", +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// uuid:"", +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// gotVec, gotTs, gotOk := v.loadIVQ(test.args.uuid) +// if err := checkFunc(test.want, gotVec, gotTs, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_vqueue_loadDVQ(t *testing.T) { +// type args struct { +// uuid string +// } +// type fields struct { +// il sync.Map[string, *index] +// dl sync.Map[string, *index] +// ic uint64 +// dc uint64 +// } +// type want struct { +// wantTs int64 +// wantOk bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, int64, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotTs int64, gotOk bool) error { +// if !reflect.DeepEqual(gotTs, w.wantTs) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotTs, w.wantTs) +// } +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// uuid:"", +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// uuid:"", +// }, +// fields: fields { +// il:nil, +// dl:nil, +// ic:0, +// dc:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// v := &vqueue{ +// il: test.fields.il, +// dl: test.fields.dl, +// ic: test.fields.ic, +// dc: test.fields.dc, +// } +// +// gotTs, gotOk := v.loadDVQ(test.args.uuid) +// if err := checkFunc(test.want, gotTs, gotOk); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_newer(t *testing.T) { +// type args struct { +// ts1 int64 +// ts2 int64 +// } +// type want struct { +// want bool +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ts1:0, +// ts2:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ts1:0, +// ts2:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := newer(test.args.ts1, test.args.ts2) +// if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } // }) diff --git a/pkg/agent/internal/vqueue/stateful_test.go b/pkg/agent/internal/vqueue/stateful_test.go index fa4845cc88..3d35f4fce2 100644 --- a/pkg/agent/internal/vqueue/stateful_test.go +++ b/pkg/agent/internal/vqueue/stateful_test.go @@ -388,7 +388,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.IVExists(idA) + _, exists := q.IVExists(idA) return &resultContainer{ exists: exists, } @@ -439,7 +439,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.IVExists(idB) + _, exists := q.IVExists(idB) return &resultContainer{ exists: exists, } @@ -490,7 +490,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.IVExists(idC) + _, exists := q.IVExists(idC) return &resultContainer{ exists: exists, } @@ -541,7 +541,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.DVExists(idA) + _, exists := q.DVExists(idA) return &resultContainer{ exists: exists, } @@ -592,7 +592,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.DVExists(idB) + _, exists := q.DVExists(idB) return &resultContainer{ exists: exists, } @@ -643,7 +643,7 @@ var ( sy := systemUnderTest.(*qSystem) q := sy.q - exists := q.DVExists(idC) + _, exists := q.DVExists(idC) return &resultContainer{ exists: exists, } diff --git a/pkg/discoverer/k8s/handler/grpc/option.go b/pkg/discoverer/k8s/handler/grpc/option.go index b7853edf21..0a29461382 100644 --- a/pkg/discoverer/k8s/handler/grpc/option.go +++ b/pkg/discoverer/k8s/handler/grpc/option.go @@ -18,11 +18,10 @@ package grpc import ( - "os" - "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net" + "github.com/vdaas/vald/internal/os" "github.com/vdaas/vald/pkg/discoverer/k8s/service" ) diff --git a/pkg/discoverer/k8s/service/discover.go b/pkg/discoverer/k8s/service/discover.go index 1d638aa6b7..9ca33e7b67 100644 --- a/pkg/discoverer/k8s/service/discover.go +++ b/pkg/discoverer/k8s/service/discover.go @@ -55,11 +55,11 @@ type discoverer struct { pods sync.Map[string, *[]pod.Pod] podMetrics sync.Map[string, mpod.Pod] services sync.Map[string, *service.Service] - podsByNode atomic.Value - podsByNamespace atomic.Value - podsByName atomic.Value - nodeByName atomic.Value - svcsByName atomic.Value + podsByNode atomic.Pointer[map[string]map[string]map[string][]*payload.Info_Pod] + podsByNamespace atomic.Pointer[map[string]map[string][]*payload.Info_Pod] + podsByName atomic.Pointer[map[string][]*payload.Info_Pod] + nodeByName atomic.Pointer[map[string]*payload.Info_Node] + svcsByName atomic.Pointer[map[string]*payload.Info_Service] ctrl k8s.Controller namespace string name string @@ -77,11 +77,18 @@ func New(selector *config.Selectors, opts ...Option) (dsc Discoverer, err error) return nil, errors.ErrOptionFailed(err, reflect.ValueOf(opt)) } } - - d.podsByNode.Store(make(map[string]map[string]map[string][]*payload.Info_Pod)) - d.podsByNamespace.Store(make(map[string]map[string][]*payload.Info_Pod)) - d.podsByName.Store(make(map[string][]*payload.Info_Pod)) - d.nodeByName.Store(make(map[string]*payload.Info_Node)) + var ( + podsByNode = make(map[string]map[string]map[string][]*payload.Info_Pod) // map[node][namespace][name][]pod + podsByNamespace = make(map[string]map[string][]*payload.Info_Pod) // map[namespace][name][]pod + podsByName = make(map[string][]*payload.Info_Pod) // map[name][]pod + nodeByName = make(map[string]*payload.Info_Node) // map[name]node + svcsByName = make(map[string]*payload.Info_Service) // map[name]svc + ) + d.podsByNode.Store(&podsByNode) + d.podsByNamespace.Store(&podsByNamespace) + d.podsByName.Store(&podsByName) + d.nodeByName.Store(&nodeByName) + d.svcsByName.Store(&svcsByName) var k8sOpts []k8s.Option k8sOpts = append(k8sOpts, @@ -365,7 +372,7 @@ func (d *discoverer) Start(ctx context.Context) (<-chan error, error) { return true } }) - d.svcsByName.Store(svcsByName) + d.svcsByName.Store(&svcsByName) var wg sync.WaitGroup wg.Add(1) @@ -407,8 +414,8 @@ func (d *discoverer) Start(ctx context.Context) (<-chan error, error) { nodeByName[nodeName].GetPods().Pods = p } } - d.nodeByName.Store(nodeByName) - d.podsByNode.Store(podsByNode) + d.nodeByName.Store(&nodeByName) + d.podsByNode.Store(&podsByNode) return nil })) wg.Add(1) @@ -422,7 +429,7 @@ func (d *discoverer) Start(ctx context.Context) (<-chan error, error) { podsByNamespace[namespace][appName] = p } } - d.podsByNamespace.Store(podsByNamespace) + d.podsByNamespace.Store(&podsByNamespace) return nil })) wg.Add(1) @@ -434,7 +441,7 @@ func (d *discoverer) Start(ctx context.Context) (<-chan error, error) { }) podsByName[appName] = p } - d.podsByName.Store(podsByName) + d.podsByName.Store(&podsByName) return nil })) wg.Wait() @@ -456,8 +463,8 @@ func (d *discoverer) GetPods(req *payload.Discoverer_Request) (pods *payload.Inf ) pods = new(payload.Info_Pods) if req.GetNode() != "" && req.GetNode() != "*" { - pbn, ok := d.podsByNode.Load().(map[string]map[string]map[string][]*payload.Info_Pod) - if !ok { + pbn := *d.podsByNode.Load() + if pbn == nil { return nil, errors.ErrInvalidDiscoveryCache } podsByNamespace, ok = pbn[req.GetNode()] @@ -467,8 +474,8 @@ func (d *discoverer) GetPods(req *payload.Discoverer_Request) (pods *payload.Inf } if req.GetNamespace() != "" && req.GetNamespace() != "*" { if podsByNamespace == nil { - podsByNamespace, ok = d.podsByNamespace.Load().(map[string]map[string][]*payload.Info_Pod) - if !ok { + podsByNamespace = *d.podsByNamespace.Load() + if podsByNamespace == nil { return nil, errors.ErrInvalidDiscoveryCache } } @@ -486,8 +493,8 @@ func (d *discoverer) GetPods(req *payload.Discoverer_Request) (pods *payload.Inf } } } else { - podsByName, ok = d.podsByName.Load().(map[string][]*payload.Info_Pod) - if !ok { + podsByName = *d.podsByName.Load() + if podsByName == nil { return nil, errors.ErrInvalidDiscoveryCache } } @@ -507,15 +514,17 @@ func (d *discoverer) GetPods(req *payload.Discoverer_Request) (pods *payload.Inf pods.GetPods()[i].GetNode().Pods = nil } } + slices.SortFunc(pods.Pods, func(left, right *payload.Info_Pod) int { + return cmp.Compare(left.GetMemory().GetUsage(), right.GetMemory().GetUsage()) + }) return pods, nil } func (d *discoverer) GetNodes( req *payload.Discoverer_Request, ) (nodes *payload.Info_Nodes, err error) { - nodes = new(payload.Info_Nodes) - nbn, ok := d.nodeByName.Load().(map[string]*payload.Info_Node) - if !ok { + nbn := *d.nodeByName.Load() + if nbn == nil { return nil, errors.ErrInvalidDiscoveryCache } if req.GetNode() != "" && req.GetNode() != "*" { @@ -527,10 +536,15 @@ func (d *discoverer) GetNodes( if err == nil { n.Pods = ps } - nodes.Nodes = append(nodes.GetNodes(), n) - return nodes, nil + return &payload.Info_Nodes{ + Nodes: []*payload.Info_Node{ + n, + }, + }, nil + } + nodes = &payload.Info_Nodes{ + Nodes: make([]*payload.Info_Node, 0, len(nbn)), } - ns := nodes.Nodes for name, n := range nbn { req.Node = name if n.GetPods() != nil { @@ -546,13 +560,11 @@ func (d *discoverer) GetNodes( n.Pods = ps } } - ns = append(ns, n) + nodes.Nodes = append(nodes.Nodes, n) } - slices.SortFunc(ns, func(left, right *payload.Info_Node) int { + slices.SortFunc(nodes.Nodes, func(left, right *payload.Info_Node) int { return cmp.Compare(left.GetMemory().GetUsage(), right.GetMemory().GetUsage()) }) - - nodes.Nodes = ns return nodes, nil } @@ -561,8 +573,8 @@ func (d *discoverer) GetServices( req *payload.Discoverer_Request, ) (svcs *payload.Info_Services, err error) { svcs = new(payload.Info_Services) - sbn, ok := d.svcsByName.Load().(map[string]*payload.Info_Service) - if !ok { + sbn := *d.svcsByName.Load() + if sbn == nil { return nil, errors.ErrInvalidDiscoveryCache } diff --git a/pkg/gateway/filter/handler/grpc/handler.go b/pkg/gateway/filter/handler/grpc/handler.go index 2b23d6bdf3..0fed9e4ed8 100644 --- a/pkg/gateway/filter/handler/grpc/handler.go +++ b/pkg/gateway/filter/handler/grpc/handler.go @@ -236,27 +236,10 @@ func (s *server) MultiSearchObject( }() r, err := s.SearchObject(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.NotFound, - vald.MultiSearchObjectRPCName+" API object "+string(query.GetObject())+"'s search request result not found", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "vectorizer targets", - Description: err.Error(), - }, - }, - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.SearchObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get()) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -299,19 +282,10 @@ func (s *server) StreamSearchObject(stream vald.Filter_StreamSearchObjectServer) res, err := s.SearchObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.SearchObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -490,19 +464,10 @@ func (s *server) MultiLinearSearchObject( r, err := s.LinearSearchObject(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.NotFound, - vald.MultiLinearSearchObjectRPCName+" API object "+string(query.GetObject())+"'s search request result not found", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.LinearSearchObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get()) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } @@ -549,19 +514,10 @@ func (s *server) StreamLinearSearchObject(stream vald.Filter_StreamSearchObjectS res, err := s.LinearSearchObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.LinearSearchObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -740,19 +696,10 @@ func (s *server) StreamInsertObject(stream vald.Filter_StreamInsertObjectServer) loc, err := s.InsertObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.InsertObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetObject().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.InsertObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -808,18 +755,10 @@ func (s *server) MultiInsertObject( loc, err := s.InsertObject(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.InsertObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetObject().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.InsertObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get()) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } @@ -997,19 +936,10 @@ func (s *server) StreamUpdateObject(stream vald.Filter_StreamUpdateObjectServer) }() loc, err := s.UpdateObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpdateObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetObject().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -1064,19 +994,10 @@ func (s *server) MultiUpdateObject( }() loc, err := s.UpdateObject(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.NotFound, "failed to parse "+vald.UpdateObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetObject().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } log.Warn(err) @@ -1262,19 +1183,10 @@ func (s *server) StreamUpsertObject(stream vald.Filter_StreamUpsertObjectServer) loc, err := s.UpsertObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpsertObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetObject().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpsertObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -1329,19 +1241,10 @@ func (s *server) MultiUpsertObject( }() loc, err := s.UpsertObject(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpsertObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetObject().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpsertObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -1448,10 +1351,10 @@ func (s *server) Search( } res, err = s.gateway.Search(ctx, req, s.copts...) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -1531,10 +1434,10 @@ func (s *server) SearchByID( }() res, err = s.gateway.SearchByID(ctx, req, s.copts...) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchByIDRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -1620,19 +1523,10 @@ func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) (err error) }() res, err := s.Search(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamSearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.SearchRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -1676,27 +1570,10 @@ func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) (er }() res, err := s.SearchByID(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "vectorizer targets", - Description: err.Error(), - }, - }, - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.SearchByIDRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -1750,27 +1627,10 @@ func (s *server) MultiSearch( }() r, err := s.Search(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.NotFound, "failed to parse "+vald.SearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "vectorizer targets", - Description: err.Error(), - }, - }, - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.SearchRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -1822,27 +1682,10 @@ func (s *server) MultiSearchByID( }() r, err := s.SearchByID(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.NotFound, "failed to parse "+vald.SearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "vectorizer targets", - Description: err.Error(), - }, - }, - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.SearchByIDRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -2099,27 +1942,10 @@ func (s *server) StreamLinearSearch(stream vald.Search_StreamLinearSearchServer) }() res, err := s.LinearSearch(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "vectorizer targets", - Description: err.Error(), - }, - }, - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.LinearSearchRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -2168,19 +1994,10 @@ func (s *server) StreamLinearSearchByID( }() res, err := s.LinearSearchByID(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.LinearSearchByIDRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -2196,11 +2013,10 @@ func (s *server) StreamLinearSearchByID( }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamLinearSearchRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } log.Error(err) @@ -2236,19 +2052,10 @@ func (s *server) MultiLinearSearch( }() r, err := s.LinearSearch(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.LinearSearchRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -2300,19 +2107,10 @@ func (s *server) MultiLinearSearchByID( }() r, err := s.LinearSearchByID(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.LinearSearchByIDRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -2496,28 +2294,10 @@ func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) (err error) }() res, err := s.Insert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.InsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "vectorizer targets", - Description: err.Error(), - }, - }, - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.InsertObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -2571,27 +2351,10 @@ func (s *server) MultiInsert( }() r, err := s.Insert(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.InsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "vectorizer targets", - Description: err.Error(), - }, - }, - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.InsertRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -2769,19 +2532,10 @@ func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) (err error) }() res, err := s.Update(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpdateRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -2829,27 +2583,10 @@ func (s *server) MultiUpdate( }() r, err := s.Update(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpdateRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "vectorizer targets", - Description: err.Error(), - }, - }, - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -3028,19 +2765,10 @@ func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) (err error) }() res, err := s.Upsert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpsertRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -3056,10 +2784,10 @@ func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamUpsertRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } log.Error(err) @@ -3096,19 +2824,10 @@ func (s *server) MultiUpsert( r, err := s.Upsert(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpsertRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } mu.Lock() @@ -3195,19 +2914,10 @@ func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) }() res, err := s.Remove(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.RemoveRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetId().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.RemoveRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -3223,10 +2933,10 @@ func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamRemoveRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } log.Error(err) @@ -3262,19 +2972,10 @@ func (s *server) MultiRemove( }() r, err := s.Remove(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.NotFound, - fmt.Sprintf(vald.MultiRemoveRPCName+" API ID = %v not found", query.GetId().GetId()), - &errdetails.RequestInfo{ - RequestId: query.GetId().GetId(), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.RemoveRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get()) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } @@ -3459,19 +3160,10 @@ func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) (err }() res, err := s.GetObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.GetObjectRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetId().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.GetObjectRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, info.Get(), - ) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamVector{ diff --git a/pkg/gateway/filter/handler/grpc/option.go b/pkg/gateway/filter/handler/grpc/option.go index a2fbfda593..8eb5d6e7da 100644 --- a/pkg/gateway/filter/handler/grpc/option.go +++ b/pkg/gateway/filter/handler/grpc/option.go @@ -18,7 +18,6 @@ package grpc import ( - "os" "runtime" "github.com/vdaas/vald/internal/client/v1/client/filter/egress" @@ -26,6 +25,7 @@ import ( "github.com/vdaas/vald/internal/client/v1/client/vald" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net" + "github.com/vdaas/vald/internal/os" "github.com/vdaas/vald/internal/sync/errgroup" ) diff --git a/pkg/gateway/internal/location/location_test.go b/pkg/gateway/internal/location/location_test.go index 80f8ee9d02..e69de29bb2 100644 --- a/pkg/gateway/internal/location/location_test.go +++ b/pkg/gateway/internal/location/location_test.go @@ -1,14 +0,0 @@ -// Copyright (C) 2019-2025 vdaas.org vald team -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// You may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// https://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -package location diff --git a/pkg/gateway/lb/handler/grpc/aggregation.go b/pkg/gateway/lb/handler/grpc/aggregation.go index cd58c6200b..24ad0925cd 100644 --- a/pkg/gateway/lb/handler/grpc/aggregation.go +++ b/pkg/gateway/lb/handler/grpc/aggregation.go @@ -31,6 +31,7 @@ import ( "github.com/vdaas/vald/internal/net/grpc/codes" "github.com/vdaas/vald/internal/net/grpc/errdetails" "github.com/vdaas/vald/internal/net/grpc/status" + "github.com/vdaas/vald/internal/observability/attribute" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/pkg/gateway/lb/service" @@ -56,7 +57,7 @@ func (s *server) aggregationSearch( f func(ctx context.Context, fcfg *payload.Search_Config, // Forwarding Config to Agent vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error), -) (res *payload.Search_Response, err error) { +) (res *payload.Search_Response, attrs []attribute.KeyValue, err error) { ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "aggregationSearch"), apiName+"/aggregationSearch") defer func() { if span != nil { @@ -235,12 +236,13 @@ func (s *server) aggregationSearch( ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1.search", ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), }) + attrs = trace.StatusCodeInternal(err.Error()) if span != nil { span.RecordError(err) - span.SetAttributes(trace.StatusCodeInternal(err.Error())...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } - return nil, err + return nil, attrs, err } res = aggr.Result() if num != 0 && len(res.GetResults()) > num { @@ -261,12 +263,13 @@ func (s *server) aggregationSearch( ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), }, info.Get(), ) + attrs = trace.StatusCodeDeadlineExceeded(err.Error()) if span != nil { span.RecordError(err) - span.SetAttributes(trace.StatusCodeDeadlineExceeded(err.Error())...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } - return nil, err + return nil, attrs, err } if 0 < min && len(res.GetResults()) < min { err = status.WrapWithDeadlineExceeded( @@ -281,12 +284,13 @@ func (s *server) aggregationSearch( ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), }, info.Get(), ) + attrs = trace.StatusCodeDeadlineExceeded(err.Error()) if span != nil { span.RecordError(err) - span.SetAttributes(trace.StatusCodeDeadlineExceeded(err.Error())...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } - return nil, err + return nil, attrs, err } } @@ -301,14 +305,15 @@ func (s *server) aggregationSearch( ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1.search", ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), }, info.Get()) + attrs = trace.FromGRPCStatus(st.Code(), msg) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } log.Warn(err) if len(res.GetResults()) == 0 { - return nil, err + return nil, attrs, err } } if num != 0 && len(res.GetResults()) == 0 { @@ -324,21 +329,23 @@ func (s *server) aggregationSearch( ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1.search", ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), }, info.Get()) + attrs = trace.StatusCodeNotFound(err.Error()) if span != nil { span.RecordError(err) - span.SetAttributes(trace.StatusCodeNotFound(err.Error())...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } - return nil, err + return nil, attrs, err } if 0 < min && len(res.GetResults()) < min { if err == nil { err = errors.ErrInsuffcientSearchResult } + attrs = trace.StatusCodeNotFound(err.Error()) if span != nil { span.RecordError(err) - span.SetAttributes(trace.StatusCodeNotFound(err.Error())...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } err = status.WrapWithNotFound( @@ -353,15 +360,16 @@ func (s *server) aggregationSearch( ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), }, info.Get(), ) + attrs = trace.StatusCodeNotFound(err.Error()) if span != nil { span.RecordError(err) - span.SetAttributes(trace.StatusCodeNotFound(err.Error())...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } - return nil, err + return nil, attrs, err } res.RequestId = bcfg.GetRequestId() - return res, nil + return res, attrs, nil } // vald standard algorithm. diff --git a/pkg/gateway/lb/handler/grpc/handler.go b/pkg/gateway/lb/handler/grpc/handler.go index 0ff41f9d55..ca21f84083 100644 --- a/pkg/gateway/lb/handler/grpc/handler.go +++ b/pkg/gateway/lb/handler/grpc/handler.go @@ -20,7 +20,6 @@ package grpc import ( "context" "fmt" - "io" "math" "slices" "strconv" @@ -33,12 +32,14 @@ import ( "github.com/vdaas/vald/internal/core/algorithm" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/info" + "github.com/vdaas/vald/internal/io" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/grpc/codes" "github.com/vdaas/vald/internal/net/grpc/errdetails" "github.com/vdaas/vald/internal/net/grpc/status" + "github.com/vdaas/vald/internal/observability/attribute" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/strings" @@ -254,11 +255,19 @@ func (s *server) Exists( attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: var ( - st *status.Status - msg string + st *status.Status + code codes.Code + msg string ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.ExistsRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ = status.FromError(err) + if st != nil { + code = st.Code() + msg = uuid + "'s object id:" + vald.ExistsRPCName + " API uuid " + uuid + "'s request returned error\t" + st.String() + } else { + code = codes.Unknown + msg = uuid + "'s object id:" + vald.ExistsRPCName + " API uuid " + uuid + "'s request returned error" + } + attrs = trace.FromGRPCStatus(code, msg) } log.Debug(err) if span != nil { @@ -301,23 +310,14 @@ func (s *server) Search( } return nil, err } - res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + res, attrs, err := s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { req.Config = fcfg return vc.Search(ctx, req, copts...) }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.SearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.SearchRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }, info.Get()) - if span != nil { + if attrs != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -361,68 +361,30 @@ func (s *server) SearchByID( } return nil, err } - vec, err := s.getObject(ctx, uuid) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) if err != nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.SearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - if st == nil || st.Code() == codes.NotFound { - err = nil - } - } - if err != nil { - if span != nil { - span.RecordError(err) - span.SetAttributes(attrs...) - span.SetStatus(trace.StatusError, err.Error()) - } - return nil, err + st, _ := status.FromError(err) + if span != nil && st != nil && st.Code() != codes.NotFound { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) } // try search by using agent's SearchByID method this operation is emergency fallback, the search quality is not same as usual SearchByID operation. - res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + var attrs []attribute.KeyValue + res, attrs, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { req.Config = fcfg return vc.SearchByID(ctx, req, copts...) }) if err == nil { return res, nil } - st, msg, err = status.ParseError(err, codes.Internal, vald.SearchByIDRPCName+" API failed to process search request", reqInfo, resInfo) - if span != nil { + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -432,18 +394,15 @@ func (s *server) SearchByID( Config: req.GetConfig(), }) if err != nil { - res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + var attrs []attribute.KeyValue + res, attrs, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { req.Config = fcfg return vc.SearchByID(ctx, req, copts...) }) - if err == nil { - return res, nil - } if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, vald.SearchByIDRPCName+" API failed to process search request", reqInfo, resInfo) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -471,7 +430,7 @@ func (s *server) doSearch( ctx context.Context, cfg *payload.Search_Config, f func(ctx context.Context, cfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error), -) (res *payload.Search_Response, err error) { +) (res *payload.Search_Response, attrs []attribute.KeyValue, err error) { ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "doSearch"), apiName+"/doSearch") defer func() { if span != nil { @@ -527,10 +486,10 @@ func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) (err error) }() res, err := s.Search(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -546,11 +505,10 @@ func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamSearchRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -575,10 +533,10 @@ func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) (er }() res, err := s.SearchByID(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchByIDRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -594,11 +552,10 @@ func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) (er }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamSearchByIDRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -636,14 +593,10 @@ func (s *server) MultiSearch( }() r, err := s.Search(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: query.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(query), - }) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -663,21 +616,13 @@ func (s *server) MultiSearch( } wg.Wait() if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, "failed to parse "+vald.MultiSearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(rids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.MultiSearchRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }) - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return res, err + return res, errs } return res, nil } @@ -713,14 +658,10 @@ func (s *server) MultiSearchByID( }() r, err := s.SearchByID(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: query.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(query), - }) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -740,21 +681,13 @@ func (s *server) MultiSearchByID( } wg.Wait() if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, "failed to parse "+vald.MultiSearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(rids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.MultiSearchByIDRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }) - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return res, err + return res, errs } return res, nil } @@ -791,23 +724,14 @@ func (s *server) LinearSearch( } return nil, err } - res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + res, attrs, err := s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { req.Config = fcfg return vc.LinearSearch(ctx, req, copts...) }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.LinearSearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetConfig().GetRequestId(), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.LinearSearchRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }, info.Get()) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -851,68 +775,30 @@ func (s *server) LinearSearchByID( } return nil, err } - vec, err := s.getObject(ctx, uuid) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) if err != nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.LinearSearchByIDRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - if st == nil || st.Code() == codes.NotFound { - err = nil - } - } - if err != nil { - if span != nil { - span.RecordError(err) - span.SetAttributes(attrs...) - span.SetStatus(trace.StatusError, err.Error()) - } - return nil, err + st, _ := status.FromError(err) + if span != nil && st != nil && st.Code() != codes.NotFound { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) } // try search by using agent's LinearSearchByID method this operation is emergency fallback, the search quality is not same as usual LinearSearchByID operation. - res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + var attrs []attribute.KeyValue + res, attrs, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { req.Config = fcfg return vc.LinearSearchByID(ctx, req, copts...) }) if err == nil { return res, nil } - st, msg, err = status.ParseError(err, codes.Internal, vald.LinearSearchByIDRPCName+" API failed to process search request", reqInfo, resInfo) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -922,18 +808,15 @@ func (s *server) LinearSearchByID( Config: req.GetConfig(), }) if err != nil { - res, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { + var attrs []attribute.KeyValue + res, attrs, err = s.doSearch(ctx, req.GetConfig(), func(ctx context.Context, fcfg *payload.Search_Config, vc vald.Client, copts ...grpc.CallOption) (*payload.Search_Response, error) { req.Config = fcfg return vc.LinearSearchByID(ctx, req, copts...) }) - if err == nil { - return res, nil - } if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, vald.LinearSearchByIDRPCName+" API failed to process search request", reqInfo, resInfo) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(attrs...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -959,10 +842,10 @@ func (s *server) StreamLinearSearch(stream vald.Search_StreamLinearSearchServer) }() res, err := s.LinearSearch(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -978,11 +861,10 @@ func (s *server) StreamLinearSearch(stream vald.Search_StreamLinearSearchServer) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamLinearSearchRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -1012,10 +894,10 @@ func (s *server) StreamLinearSearchByID( }() res, err := s.LinearSearchByID(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchByIDRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -1031,11 +913,10 @@ func (s *server) StreamLinearSearchByID( }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamLinearSearchByIDRPCName+" gRPC error response") + st, _ := status.FromError(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -1073,14 +954,10 @@ func (s *server) MultiLinearSearch( }() r, err := s.LinearSearch(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: query.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(query), - }) - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -1100,21 +977,13 @@ func (s *server) MultiLinearSearch( } wg.Wait() if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, "failed to parse "+vald.MultiLinearSearchRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(rids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.MultiLinearSearchRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }) - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return res, err + return res, errs } return res, nil } @@ -1150,14 +1019,10 @@ func (s *server) MultiLinearSearchByID( }() r, err := s.LinearSearchByID(ctx, query) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: query.GetConfig().GetRequestId(), - ServingData: errdetails.Serialize(query), - }) + st, _ := status.FromError(err) if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -1177,21 +1042,13 @@ func (s *server) MultiLinearSearchByID( } wg.Wait() if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, "failed to parse "+vald.MultiLinearSearchByIDRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: strings.Join(rids, ","), - ServingData: errdetails.Serialize(reqs), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.MultiLinearSearchByIDRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }) + st, _ := status.FromError(errs) if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - return res, err + return res, errs } return res, nil } @@ -1397,19 +1254,10 @@ func (s *server) Insert( } } if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, - "failed to parse "+vald.InsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: uuid, - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.InsertRPCName + ".DoMulti", - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }, info.Get()) - if span != nil { + st, _ := status.FromError(errs) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -1436,10 +1284,10 @@ func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) (err error) }() res, err := s.Insert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.InsertRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -1455,10 +1303,10 @@ func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamInsertRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -1497,10 +1345,10 @@ func (s *server) MultiInsert( }() res, err := s.Insert(ectx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.InsertRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -1569,10 +1417,10 @@ func (s *server) MultiInsert( } if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, "error detected"+vald.MultiInsertRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } errs = err @@ -1778,62 +1626,28 @@ func (s *server) Update( } if !req.GetConfig().GetSkipStrictExistCheck() { - vec, err := s.getObject(ctx, uuid) - if err != nil || vec == nil { - var ( - attrs trace.Attributes - st *status.Status - msg string - ) - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = status.WrapWithNotFound(vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s object not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeNotFound(err.Error()) - default: - code := codes.Unknown - if err == nil { - err = errors.ErrObjectIDNotFound(uuid) - code = codes.NotFound - } - st, msg, err = status.ParseError(err, code, vald.GetObjectRPCName+" API for "+vald.UpdateRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - } - if span != nil { + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + if err != nil { + st, _ := status.FromError(err) + if span != nil && st != nil { span.RecordError(err) - span.SetAttributes(attrs...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err } if conv.F32stos(vec.GetVector()) == conv.F32stos(req.GetVector().GetVector()) { - if err == nil { - err = errors.ErrSameVectorAlreadyExists(uuid, vec.GetVector(), req.GetVector().GetVector()) + if vec.GetTimestamp() < req.GetVector().GetTimestamp() { + return s.UpdateTimestamp(ctx, &payload.Update_TimestampRequest{ + Id: uuid, + Timestamp: req.GetVector().GetTimestamp(), + }) } + err = errors.ErrSameVectorAlreadyExists(uuid, vec.GetVector(), req.GetVector().GetVector()) st, msg, err := status.ParseError(err, codes.AlreadyExists, "error "+vald.UpdateRPCName+" API ID = "+uuid+"'s same vector data already exists", &errdetails.RequestInfo{ @@ -1858,7 +1672,7 @@ func (s *server) Update( } } var now int64 - if req.GetConfig().GetTimestamp() != 0 { + if req.GetConfig().GetTimestamp() > 0 { now = req.GetConfig().GetTimestamp() } else { now = time.Now().UnixNano() @@ -1893,20 +1707,10 @@ func (s *server) Update( return nil, err } - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.RemoveRPCName+" for "+vald.UpdateRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: uuid, - ServingData: errdetails.Serialize(rreq), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateRPCName + "." + vald.RemoveRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }, info.Get()) - - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -1939,19 +1743,10 @@ func (s *server) Update( } return nil, err } - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.InsertRPCName+" for "+vald.UpdateRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: uuid, - ServingData: errdetails.Serialize(ireq), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateRPCName + "." + vald.InsertRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }, info.Get()) - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -1976,10 +1771,10 @@ func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) (err error) }() res, err := s.Update(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpdateRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -1995,10 +1790,10 @@ func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamUpdateRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -2037,10 +1832,10 @@ func (s *server) MultiUpdate( }() res, err := s.Update(ectx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpdateRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -2109,16 +1904,229 @@ func (s *server) MultiUpdate( } if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, "error detected"+vald.MultiUpdateRPCName+" gRPC error response") + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) + } + } + + return locs, errs +} + +func (s *server) UpdateTimestamp( + ctx context.Context, req *payload.Update_TimestampRequest, +) (res *payload.Object_Location, err error) { + ctx, span := trace.StartSpan(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateTimestampRPCName), apiName+"/"+vald.UpdateTimestampRPCName) + defer func() { + if span != nil { + span.End() + } + }() + uuid := req.GetId() + reqInfo := &errdetails.RequestInfo{ + RequestId: uuid, + ServingData: errdetails.Serialize(req), + } + resInfo := &errdetails.ResourceInfo{ + ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateTimestampRPCName + "." + vald.GetObjectRPCName, + ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), + } + if len(uuid) == 0 { + err = errors.ErrInvalidMetaDataConfig + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid uuid", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "invalid id", + Description: err.Error(), + }, + }, + }) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) span.SetStatus(trace.StatusError, err.Error()) } - errs = err + return nil, err } + ts := req.GetTimestamp() + if ts < 0 { + err = errors.ErrInvalidTimestamp(ts) + err = status.WrapWithInvalidArgument(vald.UpdateTimestampRPCName+" API invalid vector argument", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "timestamp", + Description: err.Error(), + }, + }, + }, info.Get()) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeInvalidArgument(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + var ( + mu sync.RWMutex + aeCount atomic.Uint64 + updated atomic.Uint64 + ls = make([]string, 0, s.replica) + visited = make(map[string]bool, s.replica) + locs = &payload.Object_Location{ + Uuid: uuid, + Ips: make([]string, 0, s.replica), + } + ) + err = s.gateway.BroadCast(ctx, service.WRITE, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "BroadCast/"+target), apiName+"/"+vald.UpdateTimestampRPCName+"/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.UpdateTimestamp(ctx, req, copts...) + if err != nil { + st, ok := status.FromError(err) + if ok && st != nil { + if st.Code() != codes.AlreadyExists && + st.Code() != codes.Canceled && + st.Code() != codes.DeadlineExceeded && + st.Code() != codes.InvalidArgument && + st.Code() != codes.NotFound && + st.Code() != codes.OK && + st.Code() != codes.Unimplemented { + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), fmt.Sprintf("UpdateTimestamp operation for Agent %s failed,\terror: %v", target, err))...) + span.SetStatus(trace.StatusError, err.Error()) + } + return err + } + if st.Code() == codes.AlreadyExists { + host, _, err := net.SplitHostPort(target) + if err != nil { + host = target + } + aeCount.Add(1) + mu.Lock() + visited[target] = true + locs.Ips = append(locs.GetIps(), host) + ls = append(ls, host) + mu.Unlock() - return locs, errs + } + } + return nil + } + if loc != nil { + updated.Add(1) + mu.Lock() + visited[target] = true + locs.Ips = append(locs.GetIps(), loc.GetIps()...) + ls = append(ls, loc.GetName()) + mu.Unlock() + } + return nil + }) + switch { + case err != nil: + st, _ := status.FromError(err) + if st != nil && span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + case len(locs.Ips) <= 0: + err = errors.ErrIndexNotFound + err = status.WrapWithNotFound(vald.UpdateTimestampRPCName+" API update target not found", err, reqInfo, resInfo) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeNotFound(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + case updated.Load()+aeCount.Load() < uint64(s.replica): + shortage := s.replica - int(updated.Load()+aeCount.Load()) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + if err != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + + err = s.gateway.DoMulti(ctx, shortage, func(ctx context.Context, target string, vc vald.Client, copts ...grpc.CallOption) (err error) { + mu.RLock() + tf, ok := visited[target] + mu.RUnlock() + if tf && ok { + return errors.Errorf("target: %s already inserted will skip", target) + } + ctx, span := trace.StartSpan(grpc.WrapGRPCMethod(ctx, "DoMulti/"+target), apiName+"/"+vald.InsertRPCName+"/"+target) + defer func() { + if span != nil { + span.End() + } + }() + loc, err := vc.Insert(ctx, &payload.Insert_Request{ + Vector: vec, + Config: &payload.Insert_Config{ + SkipStrictExistCheck: true, + Timestamp: ts, + }, + }, copts...) + if err != nil { + st, ok := status.FromError(err) + if ok && st != nil && span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), fmt.Sprintf("Shortage index Insert for Update operation for Agent %s failed,\terror: %v", target, err))...) + span.SetStatus(trace.StatusError, err.Error()) + } + return err + } + if loc != nil { + updated.Add(1) + mu.Lock() + locs.Ips = append(locs.GetIps(), loc.GetIps()...) + ls = append(ls, loc.GetName()) + mu.Unlock() + } + return nil + }) + if err != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { + span.RecordError(err) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + } + case updated.Load() == 0 && aeCount.Load() > 0: + err = status.WrapWithAlreadyExists(vald.UpdateTimestampRPCName+" API update target same vector already exists", errors.ErrSameVectorAlreadyExists(uuid, nil, nil), reqInfo, resInfo) + if span != nil { + span.RecordError(err) + span.SetAttributes(trace.StatusCodeAlreadyExists(err.Error())...) + span.SetStatus(trace.StatusError, err.Error()) + } + return nil, err + + } + slices.Sort(ls) + locs.Name = strings.Join(ls, ",") + return locs, nil } func (s *server) Upsert( @@ -2180,48 +2188,19 @@ func (s *server) Upsert( } var shouldInsert bool if !req.GetConfig().GetSkipStrictExistCheck() { - vec, err := s.getObject(ctx, uuid) - var ( - attrs trace.Attributes - st *status.Status - msg string - ) + vec, err := s.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: uuid, + }, + }) + var attrs trace.Attributes if err != nil || vec == nil { - switch { - case errors.Is(err, errors.ErrInvalidUUID(uuid)): - err = status.WrapWithInvalidArgument( - vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API invalid argument for uuid \""+uuid+"\" detected", - err, - reqInfo, - resInfo, - &errdetails.BadRequest{ - FieldViolations: []*errdetails.BadRequestFieldViolation{ - { - Field: "uuid", - Description: err.Error(), - }, - }, - }, - ) - attrs = trace.StatusCodeInvalidArgument(err.Error()) - case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): - err = status.WrapWithInternal(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API connection not found", err, reqInfo, resInfo) - attrs = trace.StatusCodeInternal(err.Error()) - case errors.Is(err, context.Canceled): - err = status.WrapWithCanceled(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API canceled", err, reqInfo, resInfo) - attrs = trace.StatusCodeCancelled(err.Error()) - case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API deadline exceeded", err, reqInfo, resInfo) - attrs = trace.StatusCodeDeadlineExceeded(err.Error()) - case errors.Is(err, errors.ErrObjectIDNotFound(uuid)), errors.Is(err, errors.ErrObjectNotFound(nil, uuid)): - err = nil - shouldInsert = true - default: - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API for "+vald.UpsertRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) - if st != nil && st.Code() == codes.NotFound { - err = nil + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + if st.Code() == codes.NotFound { shouldInsert = true + err = nil } } } else if conv.F32stos(vec.GetVector()) == conv.F32stos(req.GetVector().GetVector()) { @@ -2236,7 +2215,6 @@ func (s *server) Upsert( } return nil, err } - } else { id, err := s.exists(ctx, uuid) if err != nil { @@ -2291,6 +2269,8 @@ func (s *server) Upsert( } if err != nil { + // Should we use `status.FromError(err)` instead of `status.PraseError(err)` ? + // It seems `operation` has an important role for tracing, so I don't refactor for now. st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+operation+" for "+vald.UpsertRPCName+" gRPC error response", &errdetails.RequestInfo{ @@ -2328,10 +2308,10 @@ func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) (err error) }() res, err := s.Upsert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpsertRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -2347,10 +2327,10 @@ func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamUpsertRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -2389,10 +2369,10 @@ func (s *server) MultiUpsert( }() res, err := s.Upsert(ectx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpsertRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -2461,10 +2441,10 @@ func (s *server) MultiUpsert( } if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, "error detected"+vald.MultiUpsertRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } errs = err @@ -2527,12 +2507,10 @@ func (s *server) Remove( err = status.WrapWithDeadlineExceeded(vald.ExistsRPCName+" API for "+vald.RemoveRPCName+" API deadline exceeded", err, reqInfo, resInfo) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.ExistsRPCName+" API for "+vald.RemoveRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + } } if err != nil { if span != nil { @@ -2594,11 +2572,10 @@ func (s *server) Remove( return nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.RemoveRPCName+" gRPC error response", reqInfo, resInfo, info.Get()) - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -2619,7 +2596,7 @@ func (s *server) Remove( } func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) { - ctx, span := trace.StartSpan(grpc.WithGRPCMethod(stream.Context(), vald.PackageName+"."+vald.SearchRPCServiceName+"/"+vald.StreamSearchRPCName), apiName+"/"+vald.StreamSearchRPCName) + ctx, span := trace.StartSpan(grpc.WithGRPCMethod(stream.Context(), vald.PackageName+"."+vald.RemoveRPCServiceName+"/"+vald.StreamRemoveRPCName), apiName+"/"+vald.StreamRemoveRPCName) defer func() { if span != nil { span.End() @@ -2635,10 +2612,10 @@ func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) }() res, err := s.Remove(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.RemoveRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -2654,10 +2631,10 @@ func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamRemoveRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -2696,10 +2673,10 @@ func (s *server) MultiRemove( }() res, err := s.Remove(ectx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.RemoveRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -2757,13 +2734,12 @@ func (s *server) MultiRemove( } if errs != nil { - st, msg, err := status.ParseError(errs, codes.Internal, "error detected"+vald.MultiRemoveRPCName+" gRPC error response") - if span != nil { - span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) - span.SetStatus(trace.StatusError, err.Error()) + st, _ := status.FromError(errs) + if st != nil && span != nil { + span.RecordError(errs) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) + span.SetStatus(trace.StatusError, errs.Error()) } - errs = err } return locs, errs @@ -2858,19 +2834,11 @@ func (s *server) RemoveByTimestamp( err = errors.Join(err, errs) } if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.RemoveByTimestampRPCName+" gRPC error response", - &errdetails.RequestInfo{ - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceName: fmt.Sprintf("%s: %s(%s)", apiName, s.name, s.ip), - }, - ) + st, _ := status.FromError(err) log.Error(err) - if span != nil { + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -3086,19 +3054,10 @@ func (s *server) Flush( return nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.FlushRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: strconv.FormatInt(now, 10), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.FlushRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %v", apiName, s.name, s.ip, s.gateway.Addrs(ctx)), - }, info.Get()) + st, _ := status.FromError(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -3184,12 +3143,10 @@ func (s *server) GetObject( err = status.WrapWithDeadlineExceeded(vald.GetObjectRPCName+" API deadline exceeded", err, reqInfo, resInfo) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetObjectRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + } } log.Debug(err) if span != nil { @@ -3217,10 +3174,10 @@ func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) (err }() res, err := s.GetObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.GetObjectRPCName+" gRPC error response") - if sspan != nil { + st, _ := status.FromError(err) + if st != nil && sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamVector{ @@ -3236,10 +3193,10 @@ func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) (err }, nil }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamGetObjectRPCName+" gRPC error response") - if span != nil { + st, _ := status.FromError(err) + if st != nil && span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } @@ -3448,12 +3405,10 @@ func (s *server) IndexInfo( err = status.WrapWithDeadlineExceeded(vald.IndexInfoRPCName+" API deadline exceeded", err, resInfo) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.IndexInfoRPCName+" API request returned error", resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + } } log.Debug(err) if span != nil { @@ -3581,12 +3536,10 @@ func (s *server) IndexDetail( err = status.WrapWithDeadlineExceeded(vald.IndexDetailRPCName+" API deadline exceeded", err, resInfo) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.IndexDetailRPCName+" API request returned error", resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + } } log.Debug(err) if span != nil { @@ -3749,12 +3702,10 @@ func (s *server) GetTimestamp( err = status.WrapWithDeadlineExceeded(vald.GetTimestampRPCName+" API deadline exceeded", err, reqInfo, resInfo) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.GetTimestampRPCName+" API uuid "+uuid+"'s request returned error", reqInfo, resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + } } if span != nil { span.RecordError(err) @@ -3793,12 +3744,10 @@ func (s *server) IndexStatistics( err = status.WrapWithDeadlineExceeded(vald.IndexStatisticsRPCName+" API deadline exceeded", err, resInfo) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.IndexStatisticsRPCName+" API request returned error", resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + } } log.Debug(err) if span != nil { @@ -3920,12 +3869,10 @@ func (s *server) IndexStatisticsDetail( err = status.WrapWithDeadlineExceeded(vald.IndexStatisticsDetailRPCName+" API deadline exceeded", err, resInfo) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.IndexStatisticsDetailRPCName+" API request returned error", resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + } } log.Debug(err) if span != nil { @@ -4160,12 +4107,10 @@ func (s *server) IndexProperty( err = status.WrapWithDeadlineExceeded(vald.IndexPropertyRPCName+" API deadline exceeded", err, resInfo) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Unknown, vald.IndexPropertyRPCName+" API request returned error", resInfo) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st != nil { + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) + } } log.Debug(err) if span != nil { diff --git a/pkg/gateway/lb/handler/grpc/handler_test.go b/pkg/gateway/lb/handler/grpc/handler_test.go index 1b0e662ad5..ca0ee8add4 100644 --- a/pkg/gateway/lb/handler/grpc/handler_test.go +++ b/pkg/gateway/lb/handler/grpc/handler_test.go @@ -3168,6 +3168,143 @@ package grpc // } // } // +// func Test_server_UpdateTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// req *payload.Update_TimestampRequest +// } +// type fields struct { +// eg errgroup.Group +// gateway service.Gateway +// timeout time.Duration +// replica int +// streamConcurrency int +// multiConcurrency int +// name string +// ip string +// UnimplementedValdServer vald.UnimplementedValdServer +// } +// type want struct { +// wantRes *payload.Object_Location +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Location, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotRes *payload.Object_Location, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotRes, w.wantRes) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// req:nil, +// }, +// fields: fields { +// eg:nil, +// gateway:nil, +// timeout:nil, +// replica:0, +// streamConcurrency:0, +// multiConcurrency:0, +// name:"", +// ip:"", +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// req:nil, +// }, +// fields: fields { +// eg:nil, +// gateway:nil, +// timeout:nil, +// replica:0, +// streamConcurrency:0, +// multiConcurrency:0, +// name:"", +// ip:"", +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// s := &server{ +// eg: test.fields.eg, +// gateway: test.fields.gateway, +// timeout: test.fields.timeout, +// replica: test.fields.replica, +// streamConcurrency: test.fields.streamConcurrency, +// multiConcurrency: test.fields.multiConcurrency, +// name: test.fields.name, +// ip: test.fields.ip, +// UnimplementedValdServer: test.fields.UnimplementedValdServer, +// } +// +// gotRes, err := s.UpdateTimestamp(test.args.ctx, test.args.req) +// if err := checkFunc(test.want, gotRes, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_server_Upsert(t *testing.T) { // type args struct { // ctx context.Context @@ -5729,3 +5866,140 @@ package grpc // }) // } // } +// +// func Test_server_IndexProperty(t *testing.T) { +// type args struct { +// ctx context.Context +// in1 *payload.Empty +// } +// type fields struct { +// eg errgroup.Group +// gateway service.Gateway +// timeout time.Duration +// replica int +// streamConcurrency int +// multiConcurrency int +// name string +// ip string +// UnimplementedValdServer vald.UnimplementedValdServer +// } +// type want struct { +// wantDetail *payload.Info_Index_PropertyDetail +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Info_Index_PropertyDetail, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotDetail *payload.Info_Index_PropertyDetail, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotDetail, w.wantDetail) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotDetail, w.wantDetail) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// in1:nil, +// }, +// fields: fields { +// eg:nil, +// gateway:nil, +// timeout:nil, +// replica:0, +// streamConcurrency:0, +// multiConcurrency:0, +// name:"", +// ip:"", +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// in1:nil, +// }, +// fields: fields { +// eg:nil, +// gateway:nil, +// timeout:nil, +// replica:0, +// streamConcurrency:0, +// multiConcurrency:0, +// name:"", +// ip:"", +// UnimplementedValdServer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// s := &server{ +// eg: test.fields.eg, +// gateway: test.fields.gateway, +// timeout: test.fields.timeout, +// replica: test.fields.replica, +// streamConcurrency: test.fields.streamConcurrency, +// multiConcurrency: test.fields.multiConcurrency, +// name: test.fields.name, +// ip: test.fields.ip, +// UnimplementedValdServer: test.fields.UnimplementedValdServer, +// } +// +// gotDetail, err := s.IndexProperty(test.args.ctx, test.args.in1) +// if err := checkFunc(test.want, gotDetail, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/gateway/lb/handler/grpc/option.go b/pkg/gateway/lb/handler/grpc/option.go index 13e8f6bf01..b6b400b46b 100644 --- a/pkg/gateway/lb/handler/grpc/option.go +++ b/pkg/gateway/lb/handler/grpc/option.go @@ -18,12 +18,12 @@ package grpc import ( - "os" "runtime" "time" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net" + "github.com/vdaas/vald/internal/os" "github.com/vdaas/vald/internal/sync/errgroup" "github.com/vdaas/vald/internal/timeutil" "github.com/vdaas/vald/pkg/gateway/lb/service" diff --git a/pkg/gateway/lb/service/gateway.go b/pkg/gateway/lb/service/gateway.go index 212d8f9594..a65ef2d984 100644 --- a/pkg/gateway/lb/service/gateway.go +++ b/pkg/gateway/lb/service/gateway.go @@ -24,6 +24,7 @@ import ( "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/client/v1/client/discoverer" + vc "github.com/vdaas/vald/internal/client/v1/client/vald" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability/trace" @@ -94,7 +95,7 @@ func (g *gateway) BroadCast( case <-ictx.Done(): return nil default: - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } @@ -129,7 +130,7 @@ func (g *gateway) DoMulti( copts ...grpc.CallOption, ) (err error) { if atomic.LoadUint32(&cur) < limit { - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } @@ -147,7 +148,7 @@ func (g *gateway) DoMulti( if atomic.LoadUint32(&cur) < limit { _, ok := visited.Load(addr) if !ok { - err = f(ictx, addr, vald.NewValdClient(conn), copts...) + err = f(ictx, addr, vc.NewValdClient(conn), copts...) if err != nil { return err } diff --git a/pkg/gateway/mirror/handler/grpc/handler.go b/pkg/gateway/mirror/handler/grpc/handler.go index f03e56f6ca..df987f0159 100644 --- a/pkg/gateway/mirror/handler/grpc/handler.go +++ b/pkg/gateway/mirror/handler/grpc/handler.go @@ -16,7 +16,6 @@ package grpc import ( "context" "fmt" - "io" "reflect" "sync/atomic" @@ -24,6 +23,7 @@ import ( "github.com/vdaas/vald/apis/grpc/v1/payload" "github.com/vdaas/vald/apis/grpc/v1/vald" "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/io" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/grpc/codes" @@ -117,14 +117,18 @@ func (s *server) Register( ) attrs = trace.StatusCodeInvalidArgument(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+mirror.RegisterRPCName+" gRPC error response", reqInfo, resInfo, + err = status.WrapWithInternal( + mirror.RegisterRPCName+" API failed to connect mirror gateway targets", err, reqInfo, resInfo, + &errdetails.BadRequest{ + FieldViolations: []*errdetails.BadRequestFieldViolation{ + { + Field: "mirror gateway targets", + Description: err.Error(), + }, + }, + }, ) - attrs = trace.FromGRPCStatus(st.Code(), msg) + attrs = trace.StatusCodeInternal(err.Error()) } log.Warn(err) if span != nil { @@ -138,7 +142,7 @@ func (s *server) Register( // Get own address and the addresses of other mirror gateways to which this gateway is currently connected. tgts, err := s.mirror.MirrorTargets(ctx) if err != nil { - err = status.WrapWithInternal(mirror.RegisterRPCName+" API failed to get connected vald gateway targets", err, + err = status.WrapWithInternal(mirror.RegisterRPCName+" API failed to get connected mirror gateway targets", err, &errdetails.BadRequest{ FieldViolations: []*errdetails.BadRequestFieldViolation{ { @@ -386,10 +390,15 @@ func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) (err error) }() res, err := s.Search(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Proto(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.SearchRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -406,11 +415,15 @@ func (s *server) StreamSearch(stream vald.Search_StreamSearchServer) (err error) }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamSearchRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamSearchRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -436,10 +449,15 @@ func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) (er }() res, err := s.SearchByID(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.SearchByIDRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Proto(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.SearchByIDRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -456,11 +474,15 @@ func (s *server) StreamSearchByID(stream vald.Search_StreamSearchByIDServer) (er }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamSearchByIDRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamSearchByIDRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -756,10 +778,15 @@ func (s *server) StreamLinearSearch(stream vald.Search_StreamLinearSearchServer) }() res, err := s.LinearSearch(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.LinearSearchRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -776,11 +803,15 @@ func (s *server) StreamLinearSearch(stream vald.Search_StreamLinearSearchServer) }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamLinearSearchRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamLinearSearchRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -811,10 +842,15 @@ func (s *server) StreamLinearSearchByID( }() res, err := s.LinearSearchByID(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.LinearSearchByIDRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.LinearSearchByIDRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Search_StreamResponse{ @@ -831,11 +867,15 @@ func (s *server) StreamLinearSearchByID( }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.StreamLinearSearchByIDRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamLinearSearchByIDRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -1002,22 +1042,18 @@ func (s *server) Insert( return loc, errors.Join(derr, err) }) if err != nil { - reqInfo := &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - } - resInfo := &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.InsertRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, s.vAddr), + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.InsertRPCName+" gRPC error response") } - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.InsertRPCName+" gRPC error response", reqInfo, resInfo, - ) - log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } + log.Warn(err) return nil, err } log.Debugf("Insert API succeeded to %#v", loc) @@ -1058,24 +1094,16 @@ func (s *server) handleInsert( return vc.Insert(ctx, req, copts...) }) if err != nil { - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+vald.InsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.InsertRPCName + ".BroadCast/" + target, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, target), - }, - ) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.InsertRPCName+" gRPC error response") + } log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } code = st.Code() @@ -1173,7 +1201,7 @@ func (s *server) handleInsert( } // In this case, the status code in the result object contains only OK or ALREADY_EXIST. - // And send Update API requst to ALREADY_EXIST cluster using the query requested by the user. + // And send Update API request to ALREADY_EXIST cluster using the query requested by the user. log.Warnf("failed to "+vald.InsertRPCName+" API: %#v", err) resLoc, err := s.handleInsertResult(ctx, alreadyExistsTgts, &payload.Update_Request{ @@ -1223,24 +1251,16 @@ func (s *server) handleInsertResult( return vc.Update(ctx, req, copts...) }) if err != nil { - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+vald.UpdateRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateRPCName + ".DoMulti/" + target, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, target), - }, - ) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpdateRPCName+" gRPC error response") + } log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } code = st.Code() @@ -1428,10 +1448,15 @@ func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) (err error) }() res, err := s.Insert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.InsertRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.InsertRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -1448,10 +1473,15 @@ func (s *server) StreamInsert(stream vald.Insert_StreamInsertServer) (err error) }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamInsertRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamInsertRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -1494,14 +1524,15 @@ func (s *server) MultiInsert( loc, err := s.Insert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.InsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.InsertRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -1557,22 +1588,18 @@ func (s *server) Update( return loc, errors.Join(derr, err) }) if err != nil { - reqInfo := &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - } - resInfo := &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, s.vAddr), + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpdateRPCName+" gRPC error response") } - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.UpdateRPCName+" gRPC error response", reqInfo, resInfo, - ) - log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } + log.Warn(err) return nil, err } log.Debugf("Update API succeeded to %#v", loc) @@ -1613,25 +1640,16 @@ func (s *server) handleUpdate( return vc.Update(ctx, req, copts...) }) if err != nil { - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+vald.UpdateRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpdateRPCName + ".BroadCast/" + target, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, target), - }, - ) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpdateRPCName+" gRPC error response") + } log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } code = st.Code() @@ -1743,7 +1761,7 @@ func (s *server) handleUpdate( } // In this case, the status code in the result object contains only OK or ALREADY_EXIST or NOT_FOUND. - // And send Insert API requst to NOT_FOUND cluster using query requested by the user. + // And send Insert API request to NOT_FOUND cluster using query requested by the user. log.Warnf("failed to "+vald.UpdateRPCName+" API: %#v", err) resLoc, err := s.handleUpdateResult(ctx, notFoundTgts, &payload.Insert_Request{ @@ -1793,25 +1811,16 @@ func (s *server) handleUpdateResult( return vc.Insert(ctx, req, copts...) }) if err != nil { - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+vald.InsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.InsertRPCName + ".BroadCast/" + target, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, target), - }, - ) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.InsertRPCName+" gRPC error response") + } log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } code = st.Code() @@ -2013,10 +2022,15 @@ func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) (err error) }() res, err := s.Update(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpdateRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Proto(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpdateRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -2033,10 +2047,15 @@ func (s *server) StreamUpdate(stream vald.Update_StreamUpdateServer) (err error) }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamUpdateRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamUpdateRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -2079,14 +2098,15 @@ func (s *server) MultiUpdate( loc, err := s.Update(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpdateRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpdateRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -2142,23 +2162,18 @@ func (s *server) Upsert( return loc, err }) if err != nil { - reqInfo := &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - } - resInfo := &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpsertRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, s.vAddr), + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpsertRPCName+" gRPC error response") } - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.UpsertRPCName+" gRPC error response", reqInfo, resInfo, - ) - log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } + log.Warn(err) return nil, err } log.Debugf("Upsert API succeeded to %#v", loc) @@ -2199,25 +2214,16 @@ func (s *server) handleUpsert( return vc.Upsert(ctx, req, copts...) }) if err != nil { - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+vald.UpsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.UpsertRPCName + ".BroadCast/" + target, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, target), - }, - ) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpsertRPCName+" gRPC error response") + } log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } code = st.Code() @@ -2382,7 +2388,7 @@ func (s *server) doUpsert( return loc, nil } -// StreamUpsert handles bidirectional streaming for upserting objects. +// StreamUpsert handles bidirectional streaming for upsert objects. // It wraps the bidirectional stream logic for the Upsert RPC method. // For each incoming request in the bidirectional stream, it calls the Upsert function. // The response is then sent back through the stream with the corresponding status or location information. @@ -2403,10 +2409,15 @@ func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) (err error) }() res, err := s.Upsert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpsertRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpsertRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -2423,10 +2434,15 @@ func (s *server) StreamUpsert(stream vald.Upsert_StreamUpsertServer) (err error) }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamUpsertRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamUpsertRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -2469,14 +2485,15 @@ func (s *server) MultiUpsert( loc, err := s.Upsert(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.UpsertRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetVector().GetId(), - ServingData: errdetails.Serialize(req), - }) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.UpsertRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -2532,22 +2549,18 @@ func (s *server) Remove( return loc, err }) if err != nil { - reqInfo := &errdetails.RequestInfo{ - RequestId: req.GetId().GetId(), - } - resInfo := &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.RemoveRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, s.vAddr), + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.RemoveRPCName+" gRPC error response") } - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.RemoveRPCName+" gRPC error response", reqInfo, resInfo, - ) - log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } + log.Warn(err) return nil, err } log.Debugf("Remove API remove succeeded to %#v", loc) @@ -2588,24 +2601,16 @@ func (s *server) handleRemove( return vc.Remove(ctx, req, copts...) }) if err != nil { - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+vald.RemoveRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetId().GetId(), - }, - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.RemoveRPCName + ".BroadCast/" + target, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, target), - }, - ) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.RemoveRPCName+" gRPC error response") + } log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } code = st.Code() @@ -2788,10 +2793,15 @@ func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) }() res, err := s.Remove(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.RemoveRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.RemoveRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamLocation{ @@ -2808,10 +2818,15 @@ func (s *server) StreamRemove(stream vald.Remove_StreamRemoveServer) (err error) }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamRemoveRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamRemoveRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -2854,14 +2869,15 @@ func (s *server) MultiRemove( loc, err := s.Remove(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.RemoveRPCName+" gRPC error response", - &errdetails.RequestInfo{ - RequestId: req.GetId().GetId(), - ServingData: errdetails.Serialize(req), - }) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.RemoveRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } emu.Lock() @@ -2917,20 +2933,16 @@ func (s *server) RemoveByTimestamp( return locs, errors.Join(derr, err) }) if err != nil { - reqInfo := &errdetails.RequestInfo{ - ServingData: errdetails.Serialize(req), + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.RemoveByTimestampRPCName+" gRPC error response") } - resInfo := &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.RemoveByTimestampRPCName, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, s.vAddr), - } - st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+vald.RemoveRPCName+" gRPC error response", reqInfo, resInfo, - ) log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return nil, err @@ -2971,21 +2983,16 @@ func (s *server) handleRemoveByTimestamp( return vc.RemoveByTimestamp(ctx, req, copts...) }) if err != nil { - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+vald.RemoveByTimestampRPCName+" gRPC error response", - &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + vald.RemoveByTimestampRPCName + ".BroadCast/" + target, - ResourceName: fmt.Sprintf("%s: %s(%s) to %s", apiName, s.name, s.ip, target), - }, - ) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.RemoveByTimestampRPCName+" gRPC error response") + } log.Warn(err) if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } code = st.Code() @@ -3233,10 +3240,15 @@ func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) (err }() res, err := s.GetObject(ctx, req) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.GetObjectRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Internal, "failed to parse "+vald.GetObjectRPCName+" gRPC error response") + } if sspan != nil { sspan.RecordError(err) - sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + sspan.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) sspan.SetStatus(trace.StatusError, err.Error()) } return &payload.Object_StreamVector{ @@ -3253,10 +3265,15 @@ func (s *server) StreamGetObject(stream vald.Object_StreamGetObjectServer) (err }, ) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamGetObjectRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving ,st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamGetObjectRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -3289,10 +3306,15 @@ func (s *server) StreamListObject( return obj, s.doStreamListObject(ctx, client, stream) }) if err != nil { - st, msg, err := status.ParseError(err, codes.Internal, "failed to parse "+vald.StreamListObjectRPCName+" gRPC error response") + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+vald.StreamListObjectRPCName+" gRPC error response") + } if span != nil { span.RecordError(err) - span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) + span.SetAttributes(trace.FromGRPCStatus(st.Code(), st.Message())...) span.SetStatus(trace.StatusError, err.Error()) } return err @@ -3348,7 +3370,7 @@ func (s *server) doStreamListObject( err = status.WrapWithCanceled("Stream Recv returned canceld error at "+id, err) attr = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded("Stream Recv returned deadlin exceeded error at "+id, err) + err = status.WrapWithDeadlineExceeded("Stream Recv returned deadline exceeded error at "+id, err) attr = trace.StatusCodeDeadlineExceeded(err.Error()) default: var ( @@ -3387,7 +3409,7 @@ func (s *server) doStreamListObject( err = status.WrapWithCanceled("Stream Send returned canceld error at "+id, err) attr = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): - err = status.WrapWithDeadlineExceeded("Stream Send returned deadlin exceeded error at "+id, err) + err = status.WrapWithDeadlineExceeded("Stream Send returned deadline exceeded error at "+id, err) attr = trace.StatusCodeDeadlineExceeded(err.Error()) default: var ( diff --git a/pkg/gateway/mirror/handler/grpc/option.go b/pkg/gateway/mirror/handler/grpc/option.go index 408ad91c2e..0a4b18c71b 100644 --- a/pkg/gateway/mirror/handler/grpc/option.go +++ b/pkg/gateway/mirror/handler/grpc/option.go @@ -14,12 +14,12 @@ package grpc import ( - "os" "runtime" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net" + "github.com/vdaas/vald/internal/os" "github.com/vdaas/vald/internal/sync/errgroup" "github.com/vdaas/vald/pkg/gateway/mirror/service" ) diff --git a/pkg/gateway/mirror/service/gateway.go b/pkg/gateway/mirror/service/gateway.go index f6bb18fb88..8470a42550 100644 --- a/pkg/gateway/mirror/service/gateway.go +++ b/pkg/gateway/mirror/service/gateway.go @@ -22,6 +22,7 @@ import ( "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability/trace" + "github.com/vdaas/vald/internal/strings" "github.com/vdaas/vald/internal/sync/errgroup" ) @@ -32,7 +33,6 @@ const ( // Gateway represents an interface for interacting with gRPC clients. type Gateway interface { - ForwardedContext(ctx context.Context, podName string) context.Context FromForwardedContext(ctx context.Context) string BroadCast(ctx context.Context, f func(ctx context.Context, target string, vc MirrorClient, copts ...grpc.CallOption) error) error @@ -73,9 +73,9 @@ func (g *gateway) GRPCClient() grpc.Client { return g.client.GRPCClient() } -// ForwardedContext takes a context and a podName, returning a new context +// forwardedContext takes a context and a podName, returning a new context // with additional information related to forwarding. -func (*gateway) ForwardedContext(ctx context.Context, podName string) context.Context { +func (*gateway) forwardedContext(ctx context.Context, podName string) context.Context { return grpc.NewOutgoingContext(ctx, grpc.MD{ forwardedContextKey: []string{ podName, @@ -113,7 +113,7 @@ func (g *gateway) BroadCast( span.End() } }() - return g.client.GRPCClient().RangeConcurrent(g.ForwardedContext(ctx, g.podName), -1, func(ictx context.Context, + return g.client.GRPCClient().RangeConcurrent(g.forwardedContext(ctx, g.podName), -1, func(ictx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption, ) (err error) { select { @@ -143,11 +143,21 @@ func (g *gateway) Do( if target == "" { return nil, errors.ErrTargetNotFound } - return g.client.GRPCClient().Do(g.ForwardedContext(ctx, g.podName), target, + fctx := g.forwardedContext(ctx, g.podName) + res, err = g.client.GRPCClient().Do(fctx, target, func(ictx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (any, error) { return f(ictx, target, NewMirrorClient(conn), copts...) }, ) + if err != nil { + return g.client.GRPCClient().RoundRobin(fctx, func(ictx context.Context, conn *grpc.ClientConn, copts ...grpc.CallOption) (any, error) { + if strings.EqualFold(conn.Target(), target) { + return nil, errors.ErrTargetNotFound + } + return f(ictx, conn.Target(), NewMirrorClient(conn), copts...) + }) + } + return res, nil } // DoMulti performs a gRPC operation on multiple targets using the provided function. @@ -168,7 +178,7 @@ func (g *gateway) DoMulti( if len(targets) == 0 { return errors.ErrTargetNotFound } - return g.client.GRPCClient().OrderedRangeConcurrent(g.ForwardedContext(ctx, g.podName), targets, -1, + return g.client.GRPCClient().OrderedRangeConcurrent(g.forwardedContext(ctx, g.podName), targets, -1, func(ictx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) (err error) { select { case <-ictx.Done(): diff --git a/pkg/gateway/mirror/service/gateway_test.go b/pkg/gateway/mirror/service/gateway_test.go index dd4cb6ecc4..793e76ddaf 100644 --- a/pkg/gateway/mirror/service/gateway_test.go +++ b/pkg/gateway/mirror/service/gateway_test.go @@ -200,7 +200,7 @@ package service // } // } // -// func Test_gateway_ForwardedContext(t *testing.T) { +// func Test_gateway_forwardedContext(t *testing.T) { // type args struct { // ctx context.Context // podName string @@ -301,7 +301,7 @@ package service // podName: test.fields.podName, // } // -// got := g.ForwardedContext(test.args.ctx, test.args.podName) +// got := g.forwardedContext(test.args.ctx, test.args.podName) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } diff --git a/pkg/gateway/mirror/service/mirror.go b/pkg/gateway/mirror/service/mirror.go index 01e6e59e20..dbd76572d4 100644 --- a/pkg/gateway/mirror/service/mirror.go +++ b/pkg/gateway/mirror/service/mirror.go @@ -26,7 +26,6 @@ import ( "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/grpc/codes" - "github.com/vdaas/vald/internal/net/grpc/errdetails" "github.com/vdaas/vald/internal/net/grpc/status" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/sync" @@ -64,10 +63,10 @@ func NewMirrorClient(conn *grpc.ClientConn) MirrorClient { } type mirr struct { - addrl sync.Map[string, any] // List of all connected addresses + addrs sync.Map[string, any] // List of all connected addresses selfMirrTgts []*payload.Mirror_Target // Targets of self mirror gateway - selfMirrAddrl sync.Map[string, any] // List of self Mirror gateway addresses - gwAddrl sync.Map[string, any] // List of Vald gateway (LB gateway) addresses + selfMirrAddrs sync.Map[string, any] // List of self Mirror gateway addresses + gwAddrs sync.Map[string, any] // List of Vald gateway (LB gateway) addresses eg errgroup.Group registerDur time.Duration gateway Gateway @@ -90,7 +89,7 @@ func NewMirror(opts ...MirrorOption) (_ Mirror, err error) { } m.selfMirrTgts = make([]*payload.Mirror_Target, 0) - m.selfMirrAddrl.Range(func(addr string, _ any) bool { + m.selfMirrAddrs.Range(func(addr string, _ any) bool { var ( host string port uint16 @@ -178,12 +177,6 @@ func (m *mirr) registers( } }() - reqInfo := &errdetails.RequestInfo{ - ServingData: errdetails.Serialize(tgts), - } - resInfo := &errdetails.ResourceInfo{ - ResourceType: errdetails.ValdGRPCResourceTypePrefix + "/vald.v1." + mirror.RegisterRPCName, - } resTgts := make([]*payload.Mirror_Target, 0, len(tgts.GetTargets())) exists := make(map[string]bool) var result sync.Map[string, error] // map[target host: error] @@ -203,37 +196,36 @@ func (m *mirr) registers( switch { case errors.Is(err, context.Canceled): err = status.WrapWithCanceled( - mirror.RegisterRPCName+" API canceld", err, reqInfo, resInfo, + mirror.RegisterRPCName+" API canceled", err, ) attrs = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): err = status.WrapWithCanceled( - mirror.RegisterRPCName+" API deadline exceeded", err, reqInfo, resInfo, + mirror.RegisterRPCName+" API deadline exceeded", err, ) attrs = trace.StatusCodeDeadlineExceeded(err.Error()) case errors.Is(err, errors.ErrGRPCClientConnNotFound("*")): err = status.WrapWithInternal( - mirror.RegisterRPCName+" API connection not found", err, reqInfo, resInfo, + mirror.RegisterRPCName+" API connection not found", err, ) attrs = trace.StatusCodeInternal(err.Error()) case errors.Is(err, errors.ErrTargetNotFound): err = status.WrapWithInvalidArgument( - mirror.RegisterRPCName+" API target not found", err, reqInfo, resInfo, + mirror.RegisterRPCName+" API target not found", err, ) attrs = trace.StatusCodeInvalidArgument(err.Error()) default: - var ( - st *status.Status - msg string - ) - st, msg, err = status.ParseError(err, codes.Internal, - "failed to parse "+mirror.RegisterRPCName+" gRPC error response", reqInfo, resInfo, - ) - attrs = trace.FromGRPCStatus(st.Code(), msg) + st, _ := status.FromError(err) + if st == nil || st.Message() == "" { + // This condition is implemented just in case to prevent nil pointer errors when retrieving st.Code() and st.Message(), although it is unlikely to match this condition. + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + st = status.New(codes.Unknown, "failed to parse "+mirror.RegisterRPCName+" gRPC error response") + } + attrs = trace.FromGRPCStatus(st.Code(), st.Message()) // When the ingress resource is deleted, the controller's default backend results(Unimplemented error) are returned so that the connection should be disconnected. // If it is a different namespace on the same cluster, the connection is automatically disconnected because the net.grpc health check fails. - if st != nil && st.Code() == codes.Unimplemented { + if st.Code() == codes.Unimplemented { host, port, err := net.SplitHostPort(target) if err != nil { log.Warn(err) @@ -247,7 +239,7 @@ func (m *mirr) registers( } } } - log.Error("failed to send Register API to %s\t: %v", target, err) + log.Errorf("failed to send Register API to %s\t: %v", target, err) if span != nil { span.RecordError(err) span.SetAttributes(attrs...) @@ -278,7 +270,7 @@ func (m *mirr) registers( if err != nil { if errors.Is(err, errors.ErrGRPCClientConnNotFound("*")) { err = status.WrapWithInternal( - mirror.RegisterRPCName+" API connection not found", err, reqInfo, resInfo, + mirror.RegisterRPCName+" API connection not found", err, ) log.Warn(err) if span != nil { @@ -288,11 +280,11 @@ func (m *mirr) registers( } return nil, err } + log.Error(err) st, msg, err := status.ParseError(err, codes.Internal, - "failed to parse "+mirror.RegisterRPCName+" gRPC error response", reqInfo, resInfo, + "failed to parse "+mirror.RegisterRPCName+" gRPC error response", ) - log.Warn(err) if span != nil { span.RecordError(err) span.SetAttributes(trace.FromGRPCStatus(st.Code(), msg)...) @@ -317,15 +309,16 @@ func (m *mirr) Connect(ctx context.Context, targets ...*payload.Mirror_Target) e for _, target := range targets { addr := net.JoinHostPort(target.GetHost(), uint16(target.GetPort())) // addr: host:port if !m.isSelfMirrorAddr(addr) && !m.isGatewayAddr(addr) { - _, ok := m.addrl.Load(addr) + _, ok := m.addrs.Load(addr) if !ok || !m.IsConnected(ctx, addr) { + m.gateway.GRPCClient().SetDisableResolveDNSAddr(addr, true) _, err := m.gateway.GRPCClient().Connect(ctx, addr) if err != nil { - m.addrl.Delete(addr) + m.addrs.Delete(addr) return err } } - m.addrl.Store(addr, struct{}{}) + m.addrs.Store(addr, struct{}{}) } } return nil @@ -345,13 +338,13 @@ func (m *mirr) Disconnect(ctx context.Context, targets ...*payload.Mirror_Target for _, target := range targets { addr := net.JoinHostPort(target.GetHost(), uint16(target.GetPort())) if !m.isGatewayAddr(addr) { - _, ok := m.addrl.Load(addr) + _, ok := m.addrs.Load(addr) if ok || m.IsConnected(ctx, addr) { if err := m.gateway.GRPCClient().Disconnect(ctx, addr); err != nil && !errors.Is(err, errors.ErrGRPCClientConnNotFound(addr)) { return err } - m.addrl.Delete(addr) + m.addrs.Delete(addr) } } } @@ -366,7 +359,7 @@ func (m *mirr) IsConnected(ctx context.Context, addr string) bool { // MirrorTargets returns the Mirror targets, including the address of this gateway and the addresses of other Mirror gateways // to which this gateway is currently connected. func (m *mirr) MirrorTargets(ctx context.Context) (tgts []*payload.Mirror_Target, err error) { - tgts = make([]*payload.Mirror_Target, 0, m.addrl.Len()) + tgts = make([]*payload.Mirror_Target, 0, m.addrs.Len()) m.RangeMirrorAddr(func(addr string, _ any) bool { if m.IsConnected(ctx, addr) { var ( @@ -391,12 +384,12 @@ func (m *mirr) MirrorTargets(ctx context.Context) (tgts []*payload.Mirror_Target } func (m *mirr) isSelfMirrorAddr(addr string) bool { - _, ok := m.selfMirrAddrl.Load(addr) + _, ok := m.selfMirrAddrs.Load(addr) return ok } func (m *mirr) isGatewayAddr(addr string) bool { - _, ok := m.gwAddrl.Load(addr) + _, ok := m.gwAddrs.Load(addr) return ok } @@ -413,7 +406,7 @@ func (m *mirr) connectedOtherMirrorAddrs(ctx context.Context) (addrs []string) { // RangeMirrorAddr calls f sequentially for each key and value present in the connection map. If f returns false, range stops the iteration. func (m *mirr) RangeMirrorAddr(f func(addr string, _ any) bool) { - m.addrl.Range(func(addr string, value any) bool { + m.addrs.Range(func(addr string, value any) bool { if !m.isGatewayAddr(addr) && !m.isSelfMirrorAddr(addr) { if !f(addr, value) { return false diff --git a/pkg/gateway/mirror/service/mirror_option.go b/pkg/gateway/mirror/service/mirror_option.go index 3523360b1e..66b0fae912 100644 --- a/pkg/gateway/mirror/service/mirror_option.go +++ b/pkg/gateway/mirror/service/mirror_option.go @@ -44,7 +44,7 @@ func WithGatewayAddrs(addrs ...string) MirrorOption { return errors.NewErrCriticalOption("lbAddrs", addrs) } for _, addr := range addrs { - m.gwAddrl.Store(addr, struct{}{}) + m.gwAddrs.Store(addr, struct{}{}) } return nil } @@ -57,7 +57,7 @@ func WithSelfMirrorAddrs(addrs ...string) MirrorOption { return errors.NewErrCriticalOption("selfMirrorAddrs", addrs) } for _, addr := range addrs { - m.selfMirrAddrl.Store(addr, struct{}{}) + m.selfMirrAddrs.Store(addr, struct{}{}) } return nil } diff --git a/pkg/gateway/mirror/service/mirror_test.go b/pkg/gateway/mirror/service/mirror_test.go index d346615ffe..d12d140212 100644 --- a/pkg/gateway/mirror/service/mirror_test.go +++ b/pkg/gateway/mirror/service/mirror_test.go @@ -87,6 +87,7 @@ func Test_mirr_Connect(t *testing.T) { ConnectFunc: func(_ context.Context, _ string, _ ...grpc.DialOption) (conn pool.Conn, err error) { return conn, err }, + SetDisableResolveDNSAddrFunc: func(addr string, disabled bool) {}, } }, }, @@ -118,6 +119,7 @@ func Test_mirr_Connect(t *testing.T) { ConnectFunc: func(_ context.Context, _ string, _ ...grpc.DialOption) (pool.Conn, error) { return nil, errors.New("missing port in address") }, + SetDisableResolveDNSAddrFunc: func(addr string, disabled bool) {}, } }, }, @@ -221,6 +223,7 @@ func Test_mirr_Disconnect(t *testing.T) { DisconnectFunc: func(_ context.Context, _ string) error { return nil }, + SetDisableResolveDNSAddrFunc: func(addr string, disabled bool) {}, } }, }, @@ -252,6 +255,7 @@ func Test_mirr_Disconnect(t *testing.T) { DisconnectFunc: func(_ context.Context, _ string) error { return errors.New("missing port in address") }, + SetDisableResolveDNSAddrFunc: func(addr string, disabled bool) {}, } }, }, @@ -373,6 +377,7 @@ func Test_mirr_MirrorTargets(t *testing.T) { IsConnectedFunc: func(_ context.Context, addr string) bool { return connected[addr] }, + SetDisableResolveDNSAddrFunc: func(addr string, disabled bool) {}, } }, }, @@ -498,6 +503,7 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { IsConnectedFunc: func(_ context.Context, addr string) bool { return connected[addr] }, + SetDisableResolveDNSAddrFunc: func(addr string, disabled bool) {}, } }, }, @@ -734,10 +740,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx context.Context // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -769,10 +775,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -797,10 +803,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // ctx:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -834,10 +840,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -857,10 +863,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts *payload.Mirror_Targets // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -897,10 +903,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -926,10 +932,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // tgts:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -963,10 +969,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -986,10 +992,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1022,10 +1028,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1051,10 +1057,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1088,10 +1094,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1110,10 +1116,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1145,10 +1151,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1173,10 +1179,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1210,10 +1216,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1232,10 +1238,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr string // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1267,10 +1273,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1295,10 +1301,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // addr:"", // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1332,10 +1338,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, @@ -1354,10 +1360,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f func(addr string, _ any) bool // } // type fields struct { -// addrl sync.Map[string, any] +// addrs sync.Map[string, any] // selfMirrTgts []*payload.Mirror_Target -// selfMirrAddrl sync.Map[string, any] -// gwAddrl sync.Map[string, any] +// selfMirrAddrs sync.Map[string, any] +// gwAddrs sync.Map[string, any] // eg errgroup.Group // registerDur time.Duration // gateway Gateway @@ -1384,10 +1390,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1412,10 +1418,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // f:nil, // }, // fields: fields { -// addrl:nil, +// addrs:nil, // selfMirrTgts:nil, -// selfMirrAddrl:nil, -// gwAddrl:nil, +// selfMirrAddrs:nil, +// gwAddrs:nil, // eg:nil, // registerDur:nil, // gateway:nil, @@ -1449,10 +1455,10 @@ func Test_mirr_connectedOtherMirrorAddrs(t *testing.T) { // checkFunc = defaultCheckFunc // } // m := &mirr{ -// addrl: test.fields.addrl, +// addrs: test.fields.addrs, // selfMirrTgts: test.fields.selfMirrTgts, -// selfMirrAddrl: test.fields.selfMirrAddrl, -// gwAddrl: test.fields.gwAddrl, +// selfMirrAddrs: test.fields.selfMirrAddrs, +// gwAddrs: test.fields.gwAddrs, // eg: test.fields.eg, // registerDur: test.fields.registerDur, // gateway: test.fields.gateway, diff --git a/pkg/gateway/mirror/usecase/vald.go b/pkg/gateway/mirror/usecase/vald.go index 1a8939b665..4bf9744572 100644 --- a/pkg/gateway/mirror/usecase/vald.go +++ b/pkg/gateway/mirror/usecase/vald.go @@ -23,9 +23,9 @@ import ( "github.com/vdaas/vald/internal/net" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/observability" - bometrics "github.com/vdaas/vald/internal/observability/metrics/backoff" + backoffmetrics "github.com/vdaas/vald/internal/observability/metrics/backoff" cbmetrics "github.com/vdaas/vald/internal/observability/metrics/circuitbreaker" - mirrmetrics "github.com/vdaas/vald/internal/observability/metrics/gateway/mirror" + mirrormetrics "github.com/vdaas/vald/internal/observability/metrics/gateway/mirror" "github.com/vdaas/vald/internal/runner" "github.com/vdaas/vald/internal/safety" "github.com/vdaas/vald/internal/servers/server" @@ -135,9 +135,9 @@ func New(cfg *config.Data) (r runner.Runner, err error) { if cfg.Observability.Enabled { obs, err = observability.NewWithConfig( cfg.Observability, - bometrics.New(), + backoffmetrics.New(), cbmetrics.New(), - mirrmetrics.New(m), + mirrormetrics.New(m), ) if err != nil { return nil, err @@ -263,7 +263,7 @@ func (r *run) Stop(ctx context.Context) (errs error) { return errs } -// PtopStop is a method called after execution of Stop. +// PostStop is a method called after execution of Stop. func (*run) PostStop(_ context.Context) error { return nil } diff --git a/pkg/index/job/correction/service/corrector.go b/pkg/index/job/correction/service/corrector.go index aab4b3947f..6b4a0a9c64 100644 --- a/pkg/index/job/correction/service/corrector.go +++ b/pkg/index/job/correction/service/corrector.go @@ -17,7 +17,6 @@ import ( "cmp" "context" "fmt" - "io" "os" "reflect" "slices" @@ -31,6 +30,7 @@ import ( "github.com/vdaas/vald/internal/db/kvs/pogreb" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/file" + "github.com/vdaas/vald/internal/io" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" "github.com/vdaas/vald/internal/net/grpc/codes" @@ -87,7 +87,7 @@ func New(opts ...Option) (_ Corrector, err error) { log.Errorf("failed to create dir %s", dir) return nil, err } - path := file.Join(dir, "checkedid.db") + path := file.Join(dir, "checked_id.db") db, err := pogreb.New(pogreb.WithPath(path), pogreb.WithBackgroundCompactionInterval(c.backgroundCompactionInterval), pogreb.WithBackgroundSyncInterval(c.backgroundSyncInterval)) @@ -136,15 +136,22 @@ func (c *correct) Start(ctx context.Context) (err error) { return err } counts := detail.GetCounts() - agents := make([]string, 0, detail.GetLiveAgents()) - for agent, count := range counts { - log.Infof("index info: addr(%s), stored(%d), uncommitted(%d), indexing=%t, saving=%t", agent, count.GetStored(), count.GetUncommitted(), count.GetIndexing(), count.GetSaving()) + agents := make([]string, 0, len(counts)) + for agent := range counts { agents = append(agents, agent) } slices.SortFunc(agents, func(left, right string) int { - return cmp.Compare(counts[left].GetStored(), counts[right].GetStored()) + return cmp.Compare(counts[right].GetStored(), counts[left].GetStored()) }) + for _, agent := range agents { + count, ok := counts[agent] + if ok && count != nil { + log.Infof("index info: addr(%s), stored(%d), uncommitted(%d), indexing=%t, saving=%t", agent, count.GetStored(), count.GetUncommitted(), count.GetIndexing(), count.GetSaving()) + } + } + log.Infof("sorted agents: %v,\tdiscovered agents: %v", agents, c.discoverer.GetAddrs(ctx)) + errs := make([]error, 0, len(agents)) emptyReq := new(payload.Object_List_Request) @@ -177,25 +184,27 @@ func (c *correct) Start(ctx context.Context) (err error) { uncommitted uint32 indexing bool saving bool + debugMsg string ) count, ok := counts[addr] if ok && count != nil { stored = count.GetStored() uncommitted = count.GetUncommitted() + indexing = count.GetIndexing() + saving = count.GetSaving() + debugMsg = fmt.Sprintf("agent %s (total index detail = stored: %d, uncommitted: %d, indexing=%t, saving=%t), stream concurrency: %d, processing %d/%d, replicas: size(%d) = addrs%v", addr, stored, uncommitted, indexing, saving, c.streamListConcurrency, corrected, len(agents), len(replicas), replicas) if stored+uncommitted == 0 { // id no indices in agent skip process + log.Warnf("skipping index correction process due to zero index detected for %s", debugMsg) return nil } - indexing = count.GetIndexing() - saving = count.GetSaving() } - debugMsg := fmt.Sprintf("agent %s (stored: %d, uncommitted: %d, indexing=%t, saving=%t), stream concurrency: %d, processing %d/%d, replicas: size(%d) = addrs%v", addr, stored, uncommitted, indexing, saving, c.streamListConcurrency, corrected, len(agents), len(replicas), replicas) eg, egctx := errgroup.WithContext(ctx) eg.SetLimit(c.streamListConcurrency) ctx, cancel := context.WithCancelCause(egctx) - stream, err := vald.NewObjectClient(conn).StreamListObject(ctx, emptyReq, copts...) - if err != nil { + stream, err := vc.NewValdClient(conn).StreamListObject(ctx, emptyReq, copts...) + if err != nil || stream == nil { return err } log.Infof("starting correction for %s", debugMsg) @@ -207,15 +216,15 @@ func (c *correct) Start(ctx context.Context) (err error) { if !errors.Is(ctx.Err(), context.Canceled) { log.Errorf("context done unexpectedly: %v for %s", ctx.Err(), debugMsg) } - if context.Cause(ctx) != io.EOF { + if !errors.Is(context.Cause(ctx), io.EOF) { log.Errorf("context canceled due to %v for %s", ctx.Err(), debugMsg) } err = eg.Wait() if err != nil { - log.Errorf("errgroup returned error: %v for %s", ctx.Err(), debugMsg) - return err + log.Errorf("correction returned error status errgroup returned error: %v for %s", ctx.Err(), debugMsg) + } else { + log.Infof("correction finished for %s", debugMsg) } - log.Infof("correction finished for %s", debugMsg) return nil default: res, err := stream.Recv() @@ -225,27 +234,12 @@ func (c *correct) Start(ctx context.Context) (err error) { } else { cancel(errors.ErrStreamListObjectStreamFinishedUnexpectedly(err)) } - } else { + } else if res != nil && res.GetVector() != nil && res.GetVector().GetId() != "" && res.GetVector().GetTimestamp() < start.UnixNano() { eg.Go(safety.RecoverFunc(func() (err error) { vec := res.GetVector() - if vec == nil || vec.GetId() == "" { - st := res.GetStatus() - if st != nil { - log.Errorf("invalid vector id: %s detected and returned status code: %d, message: %s, details: %v, debug: %s", vec.GetId(), st.GetCode(), st.GetMessage(), st.GetDetails(), debugMsg) - } - return errors.ErrFailedToReceiveVectorFromStream - } - - // skip if the vector is inserted after correction start - if vec.GetTimestamp() > start.UnixNano() { - log.Debugf("index correction process for ID: %s skipped due to newer timestamp detected. job started at %s but object timestamp is %s", - vec.GetId(), - start.Format(time.RFC3339Nano), - time.Unix(0, vec.GetTimestamp()).Format(time.RFC3339Nano)) - return nil - } - + ts := vec.GetTimestamp() id := vec.GetId() + _, ok, err := c.checkedList.Get(id) if err != nil { log.Errorf("failed to perform Get from check list but still try to finish processing without cache: %v", err) @@ -264,418 +258,35 @@ func (c *correct) Start(ctx context.Context) (err error) { // Therefore, the process is only to correct the missing replicas. if len(replicas) <= 0 { diff := c.indexReplica - 1 - addrs := c.discoverer.GetAddrs(egctx) // correct index replica shortage if diff > 0 { - log.Infof("replica shortage(diff=%d) of vector id: %s detected from last %s. inserting to other agents = %v", diff, id, debugMsg, addrs) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToInsert - } - req := &payload.Insert_Request{ - Vector: vec, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - Timestamp: vec.GetTimestamp(), - }, - } - for _, daddr := range addrs { - if diff > 0 && daddr != addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - _, err := client.Insert(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < vec.GetTimestamp() { - _, err := client.Update(ctx, &payload.Update_Request{ - Vector: vec, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: vec.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to insert object to agent(%s): %w", daddr, err)) - } - } - } + return c.correctShortage(egctx, id, addr, debugMsg, vec, make(map[string]*payload.Object_Timestamp), diff) } return nil } - var ( - latest int64 - mu sync.Mutex - found = make(map[string]*payload.Object_Timestamp, len(addr)) - latestAgent = addr - ) // load index replica from other agents and store it to found map - if err := c.discoverer.GetClient().OrderedRangeConcurrent(egctx, replicas, len(replicas), - func(ctx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) error { - ots, err := vald.NewObjectClient(conn).GetTimestamp(ctx, &payload.Object_TimestampRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call GetTimestamp to agent: %s, id: %s returned not a gRPC status error: %v", addr, id, err) - return err - } else if st.Code() == codes.NotFound { - // when replica of agent > index replica, this happens - return nil - } else if st.Code() == codes.Canceled { - return nil - } else { - log.Errorf("failed to GetTimestamp with unexpected error. agent: %s, id: %s, code: %v, message: %s", addr, id, st.Code(), st.Message()) - return err - } - } - - // skip if the vector is inserted after correction start - if ots.GetTimestamp() > start.UnixNano() { - log.Debugf("timestamp of vector(id: %s, timestamp: %v) is newer than correction start time(%v). skipping...", - ots.GetId(), - ots.GetTimestamp(), - start.UnixNano(), - ) - return nil - } - mu.Lock() - found[addr] = ots - if latest < ots.GetTimestamp() { - latest = ots.GetTimestamp() - if latest > vec.GetTimestamp() { - latestAgent = addr - } - } - mu.Unlock() - return nil - }, - ); err != nil { + found, skipped, latest, latestAgent, err := c.loadReplicaInfo(egctx, addr, id, replicas, counts, ts, start) + if err != nil { return err } - latestObject := vec - - // current object timestamp is not latest get latest object from other agent index replica - if vec.GetTimestamp() < latest && latestAgent != addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.ObjectRPCServiceName+"/"+vald.GetObjectRPCName), latestAgent, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - obj, err := vald.NewObjectClient(conn).GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() >= latest && obj.GetId() != "" && obj.GetVector() != nil { - latestObject = obj + if len(found) != 0 && ((len(replicas) > 0 && len(skipped) == 0) || (len(skipped) > 0 && len(skipped) < len(replicas))) { + // current object timestamp is not latest get latest object from other agent index replica + if ts < latest && latestAgent != addr { + latestObject := c.getLatestObject(egctx, id, addr, latestAgent, latest) + if latestObject != nil && latestObject.GetVector() != nil && latestObject.GetId() != "" && latestObject.GetTimestamp() >= latest { + vec = latestObject } - return obj, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to load latest object id: %s, agent: %s, timestamp: %d, error: %w", id, addr, latest, err)) } + c.correctTimestamp(ctx, id, vec, found) + } else if len(skipped) > 0 { + log.Debugf("timestamp correction for index id %s skipped, replica %s, skipped agents: %v", id, addr, skipped) } - if latestObject.Timestamp < latest { - latestObject.Timestamp = latest - } - tss := time.Unix(0, latestObject.GetTimestamp()).Format(time.RFC3339Nano) // timestamp string - for addr, ots := range found { // correct timestamp inconsistency - if latestObject.GetTimestamp() > ots.GetTimestamp() { - log.Infof("timestamp inconsistency detected with vector(id: %s, timestamp: %s). updating with the latest vector(id: %s, timestamp: %s)", - ots.GetId(), - time.Unix(0, ots.GetTimestamp()).Format(time.RFC3339Nano), - latestObject.GetId(), - tss, - ) - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateRPCName), addr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - // TODO: use UpdateTimestamp when it's implemented because here we just want to update only the timestamp but not the vector - _, err := client.Update(ctx, &payload.Update_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - _, err = client.Insert(ctx, &payload.Insert_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp(), - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < latestObject.GetTimestamp() { - _, err = client.Update(ctx, &payload.Update_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - } - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - log.Infof("vector successfully updated. address: %s, uuid: %s, timestamp: %s", addr, latestObject.GetId(), tss) - c.correctedOldIndexCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to fix timestamp to %s for id %s agent %s error: %w", tss, id, addr, err)) - } - } - } - currentNumberOfIndexReplica := len(found) + 1 - diff := c.indexReplica - currentNumberOfIndexReplica - addrs := c.discoverer.GetAddrs(egctx) + diff := c.indexReplica - (len(found) + 1) if diff > 0 { // correct index replica shortage - log.Infof("replica shortage(diff=%d) of vector id: %s detected for %s. inserting to other agents = %v", diff, id, debugMsg, addrs) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToInsert - } - req := &payload.Insert_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Insert_Config{ - Timestamp: latestObject.GetTimestamp(), - }, - } - for _, daddr := range addrs { - if diff > 0 && daddr != addr { - _, ok := found[daddr] - if !ok { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - client := vald.NewValdClient(conn) - _, err := client.Insert(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.AlreadyExists { - obj, err := client.GetObject(ctx, &payload.Object_VectorRequest{ - Id: &payload.Object_ID{ - Id: id, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - if obj.GetTimestamp() < latestObject.GetTimestamp() { - _, err = client.Update(ctx, &payload.Update_Request{ - Vector: latestObject, - // TODO: this should be deleted after Config.Timestamp deprecation - Config: &payload.Update_Config{ - // TODO: Decrementing because it's gonna be incremented befor being pushed - // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation - // so we should consider refactoring vqueue. - Timestamp: latestObject.GetTimestamp() - 1, - }, - }, copts...) - if err != nil { - if st, ok = status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - c.correctedOldIndexCount.Add(1) - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff-- - c.correctedReplicationCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to insert object to agent(%s): %w", daddr, err)) - } - } - } - } + return c.correctShortage(egctx, id, addr, debugMsg, vec, found, diff) } else if diff < 0 { // correct index replica oversupply - log.Infof("replica oversupply of vector %s. deleting...", id) - if len(addrs) == 0 { - return errors.ErrNoAvailableAgentToRemove - } - req := &payload.Remove_Request{ - Id: &payload.Object_ID{ - Id: id, - }, - } - for _, daddr := range addrs { - if diff < 0 { - _, ok := found[daddr] - if ok || daddr == addr { - _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(egctx, vald.PackageName+"."+vald.RemoveRPCServiceName+"/"+vald.RemoveRPCName), daddr, func(ctx context.Context, - conn *grpc.ClientConn, - copts ...grpc.CallOption, - ) (any, error) { - _, err := vald.NewRemoveClient(conn).Remove(ctx, req, copts...) - if err != nil { - if st, ok := status.FromError(err); !ok { - log.Errorf("gRPC call returned not a gRPC status error: %v", err) - return nil, err - } else if st.Code() == codes.NotFound { - diff++ - c.correctedReplicationCount.Add(1) - return nil, nil - } else if st.Code() == codes.Canceled { - return nil, nil - } - return nil, err - } - diff++ - c.correctedReplicationCount.Add(1) - return nil, nil - }) - if err != nil { - log.Error(fmt.Errorf("failed to delete object from agent(%s): %w", daddr, err)) - } - } - } - } + return c.correctOversupply(egctx, id, addr, debugMsg, found, diff) } return nil })) @@ -710,3 +321,320 @@ func (c *correct) NumberOfCorrectedOldIndex() uint64 { func (c *correct) NumberOfCorrectedReplication() uint64 { return c.correctedReplicationCount.Load() } + +func (c *correct) loadReplicaInfo( + ctx context.Context, + originAddr, id string, + replicas []string, + counts map[string]*payload.Info_Index_Count, + ts int64, + start time.Time, +) ( + found map[string]*payload.Object_Timestamp, + skipped []string, + latest int64, + latestAgent string, + err error, +) { + var mu sync.Mutex + latestAgent = originAddr + skipped = make([]string, 0, len(replicas)) + found = make(map[string]*payload.Object_Timestamp, c.indexReplica-1) + tss := time.Unix(0, start.UnixNano()).Format(time.RFC3339Nano) + err = c.discoverer.GetClient().OrderedRangeConcurrent(ctx, replicas, len(replicas), + func(ctx context.Context, addr string, conn *grpc.ClientConn, copts ...grpc.CallOption) error { + if originAddr == addr { + return nil + } + count, ok := counts[addr] // counts is read-only we don't need to lock. + if ok && count != nil && count.GetStored() == 0 && count.GetUncommitted() == 0 { + mu.Lock() + skipped = append(skipped, addr) + mu.Unlock() + return nil + } + + ots, err := vc.NewValdClient(conn).GetTimestamp(ctx, &payload.Object_TimestampRequest{ + Id: &payload.Object_ID{ + Id: id, + }, + }) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call GetTimestamp to agent: %s, id: %s returned not a gRPC status error: %v", addr, id, err) + return err + } else if st.Code() == codes.NotFound { + // when replica of agent > index replica, this happens + return nil + } else if st.Code() == codes.Canceled { + return nil + } else { + log.Errorf("failed to GetTimestamp with unexpected error. agent: %s, id: %s, code: %v, message: %s", addr, id, st.Code(), st.Message()) + return err + } + } + + if ots == nil { + // not found + return nil + } + + // skip if the vector is inserted after correction start + if ots.GetTimestamp() > start.UnixNano() { + log.Debugf("timestamp of vector(id: %s, timestamp: %s) is newer than correction start time(%s). skipping...", + ots.GetId(), + time.Unix(0, ots.GetTimestamp()).Format(time.RFC3339Nano), + tss, + ) + return nil + } + mu.Lock() + found[addr] = ots + if latest < ots.GetTimestamp() { + latest = ots.GetTimestamp() + if latest > ts { + latestAgent = addr + } + } + mu.Unlock() + return nil + }, + ) + return +} + +func (c *correct) getLatestObject( + ctx context.Context, id, addr, latestAgent string, latest int64, +) (latestObject *payload.Object_Vector) { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.ObjectRPCServiceName+"/"+vald.GetObjectRPCName), latestAgent, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + obj, err := vc.NewValdClient(conn).GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: id, + }, + }, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + if obj == nil { + // not found + return nil, nil + } + if obj.GetTimestamp() >= latest && obj.GetId() != "" && obj.GetVector() != nil { + latestObject = obj + } + return obj, nil + }) + if err != nil { + log.Errorf("failed to load latest object id: %s, agent: %s, timestamp: %d, error: %v", id, addr, latest, err) + } + if latestObject != nil && latestObject.GetTimestamp() < latest { + latestObject.Timestamp = latest + } + return latestObject +} + +func (c *correct) correctTimestamp( + ctx context.Context, + id string, + latestObject *payload.Object_Vector, + found map[string]*payload.Object_Timestamp, +) { + tss := time.Unix(0, latestObject.GetTimestamp()).Format(time.RFC3339Nano) // timestamp string + for addr, ots := range found { // correct timestamp inconsistency + if latestObject.GetTimestamp() > ots.GetTimestamp() { + log.Infof("timestamp inconsistency detected with vector(id: %s, timestamp: %s). updating with the latest vector(id: %s, timestamp: %s)", + ots.GetId(), + time.Unix(0, ots.GetTimestamp()).Format(time.RFC3339Nano), + latestObject.GetId(), + tss, + ) + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.UpdateRPCServiceName+"/"+vald.UpdateRPCName), addr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + client := vc.NewValdClient(conn) + _, err := client.UpdateTimestamp(ctx, &payload.Update_TimestampRequest{ + Id: latestObject.GetId(), + Timestamp: latestObject.GetTimestamp(), + }, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.Canceled || + st.Code() == codes.AlreadyExists || + st.Code() == codes.InvalidArgument || + st.Code() == codes.NotFound { + return nil, nil + } + return nil, err + } + log.Infof("vector successfully updated. address: %s, uuid: %s, timestamp: %s", addr, latestObject.GetId(), tss) + c.correctedOldIndexCount.Add(1) + return nil, nil + }) + if err != nil { + log.Errorf("failed to fix timestamp to %s for id %s agent %s error: %w", tss, id, addr, err) + } + } + } +} + +func (c *correct) correctOversupply( + ctx context.Context, + id, selfAddr, debugMsg string, + found map[string]*payload.Object_Timestamp, + diff int, +) (err error) { + addrs := c.discoverer.GetAddrs(ctx) + log.Infof("replica oversupply(configured: %d, stored: %d, diff: %d) of vector id: %s detected for %s. deleting from agents = %v", c.indexReplica, len(found)+1, diff, id, debugMsg, found) + if len(addrs) == 0 { + return errors.ErrNoAvailableAgentToRemove + } + req := &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: id, + }, + } + for _, daddr := range addrs { + if diff < 0 { + _, ok := found[daddr] + if ok || daddr == selfAddr { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.RemoveRPCServiceName+"/"+vald.RemoveRPCName), daddr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + _, err := vc.NewValdClient(conn).Remove(ctx, req, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + diff++ + c.correctedReplicationCount.Add(1) + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + diff++ + c.correctedReplicationCount.Add(1) + return nil, nil + }) + if err != nil { + log.Errorf("failed to delete object from agent(%s): %w", daddr, err) + } + } + } + } + return nil +} + +func (c *correct) correctShortage( + ctx context.Context, + id, selfAddr, debugMsg string, + latestObject *payload.Object_Vector, + found map[string]*payload.Object_Timestamp, + diff int, +) (err error) { + addrs := c.discoverer.GetAddrs(ctx) + log.Infof("replica shortage(configured: %d, stored: %d, diff: %d) of vector id: %s detected for %s. inserting to other agents = %v", c.indexReplica, len(found)+1, diff, id, debugMsg, addrs) + if len(addrs) == 0 { + return errors.ErrNoAvailableAgentToInsert + } + req := &payload.Insert_Request{ + Vector: latestObject, + // TODO: this should be deleted after Config.Timestamp deprecation + Config: &payload.Insert_Config{ + Timestamp: latestObject.GetTimestamp(), + }, + } + for _, daddr := range addrs { + if diff > 0 && daddr != selfAddr { + _, ok := found[daddr] + if !ok { + _, err := c.discoverer.GetClient().Do(grpc.WithGRPCMethod(ctx, vald.PackageName+"."+vald.InsertRPCServiceName+"/"+vald.InsertRPCName), daddr, func(ctx context.Context, + conn *grpc.ClientConn, + copts ...grpc.CallOption, + ) (any, error) { + client := vc.NewValdClient(conn) + _, err := client.Insert(ctx, req, copts...) + if err != nil { + if st, ok := status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.AlreadyExists { + var obj *payload.Object_Vector + obj, err = client.GetObject(ctx, &payload.Object_VectorRequest{ + Id: &payload.Object_ID{ + Id: id, + }, + }, copts...) + if err != nil { + if st, ok = status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + if obj != nil { + if obj.GetTimestamp() < latestObject.GetTimestamp() { + _, err = client.Update(ctx, &payload.Update_Request{ + Vector: latestObject, + // TODO: this should be deleted after Config.Timestamp deprecation + Config: &payload.Update_Config{ + // TODO: Decrementing because it's gonna be incremented before being pushed + // to vqueue in the agent. This is a not ideal workaround for the current vqueue implementation + // so we should consider refactoring vqueue. + Timestamp: latestObject.GetTimestamp() - 1, + }, + }, copts...) + if err != nil { + if st, ok = status.FromError(err); !ok || st == nil { + log.Errorf("gRPC call returned not a gRPC status error: %v", err) + return nil, err + } else if st.Code() == codes.NotFound { + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + } + diff-- + c.correctedReplicationCount.Add(1) + } + return nil, nil + } else if st.Code() == codes.Canceled { + return nil, nil + } + return nil, err + } + diff-- + c.correctedReplicationCount.Add(1) + return nil, nil + }) + if err != nil { + log.Errorf("failed to insert object to agent(%s): %w", daddr, err) + } + } + } + } + return nil +} diff --git a/pkg/index/job/correction/service/corrector_test.go b/pkg/index/job/correction/service/corrector_test.go index 0981d9e6f6..996c4dde97 100644 --- a/pkg/index/job/correction/service/corrector_test.go +++ b/pkg/index/job/correction/service/corrector_test.go @@ -905,3 +905,784 @@ package service // }) // } // } +// +// func Test_correct_loadReplicaInfo(t *testing.T) { +// type args struct { +// ctx context.Context +// originAddr string +// id string +// replicas []string +// counts map[string]*payload.Info_Index_Count +// ts int64 +// start time.Time +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct { +// wantFound map[string]*payload.Object_Timestamp +// wantSkipped []string +// wantLatest int64 +// wantLatestAgent string +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, map[string]*payload.Object_Timestamp, []string, int64, string, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotFound map[string]*payload.Object_Timestamp, gotSkipped []string, gotLatest int64, gotLatestAgent string, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotFound, w.wantFound) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotFound, w.wantFound) +// } +// if !reflect.DeepEqual(gotSkipped, w.wantSkipped) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotSkipped, w.wantSkipped) +// } +// if !reflect.DeepEqual(gotLatest, w.wantLatest) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLatest, w.wantLatest) +// } +// if !reflect.DeepEqual(gotLatestAgent, w.wantLatestAgent) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLatestAgent, w.wantLatestAgent) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// originAddr:"", +// id:"", +// replicas:nil, +// counts:nil, +// ts:0, +// start:time.Time{}, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// originAddr:"", +// id:"", +// replicas:nil, +// counts:nil, +// ts:0, +// start:time.Time{}, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// gotFound, gotSkipped, gotLatest, gotLatestAgent, err := c.loadReplicaInfo( +// test.args.ctx, +// test.args.originAddr, +// test.args.id, +// test.args.replicas, +// test.args.counts, +// test.args.ts, +// test.args.start, +// ) +// if err := checkFunc(test.want, gotFound, gotSkipped, gotLatest, gotLatestAgent, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_correct_getLatestObject(t *testing.T) { +// type args struct { +// ctx context.Context +// id string +// addr string +// latestAgent string +// latest int64 +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct { +// wantLatestObject *payload.Object_Vector +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, *payload.Object_Vector) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotLatestObject *payload.Object_Vector) error { +// if !reflect.DeepEqual(gotLatestObject, w.wantLatestObject) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotLatestObject, w.wantLatestObject) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// id:"", +// addr:"", +// latestAgent:"", +// latest:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// id:"", +// addr:"", +// latestAgent:"", +// latest:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// gotLatestObject := c.getLatestObject(test.args.ctx, test.args.id, test.args.addr, test.args.latestAgent, test.args.latest) +// if err := checkFunc(test.want, gotLatestObject); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_correct_correctTimestamp(t *testing.T) { +// type args struct { +// ctx context.Context +// id string +// latestObject *payload.Object_Vector +// found map[string]*payload.Object_Timestamp +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct{} +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// id:"", +// latestObject:nil, +// found:nil, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// id:"", +// latestObject:nil, +// found:nil, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// c.correctTimestamp(test.args.ctx, test.args.id, test.args.latestObject, test.args.found) +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_correct_correctOversupply(t *testing.T) { +// type args struct { +// ctx context.Context +// id string +// selfAddr string +// debugMsg string +// found map[string]*payload.Object_Timestamp +// diff int +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// id:"", +// selfAddr:"", +// debugMsg:"", +// found:nil, +// diff:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// id:"", +// selfAddr:"", +// debugMsg:"", +// found:nil, +// diff:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// err := c.correctOversupply(test.args.ctx, test.args.id, test.args.selfAddr, test.args.debugMsg, test.args.found, test.args.diff) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_correct_correctShortage(t *testing.T) { +// type args struct { +// ctx context.Context +// id string +// selfAddr string +// debugMsg string +// latestObject *payload.Object_Vector +// found map[string]*payload.Object_Timestamp +// diff int +// } +// type fields struct { +// eg errgroup.Group +// discoverer discoverer.Client +// gateway vc.Client +// checkedList pogreb.DB +// checkedIndexCount atomic.Uint64 +// correctedOldIndexCount atomic.Uint64 +// correctedReplicationCount atomic.Uint64 +// indexReplica int +// streamListConcurrency int +// backgroundSyncInterval time.Duration +// backgroundCompactionInterval time.Duration +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// id:"", +// selfAddr:"", +// debugMsg:"", +// latestObject:nil, +// found:nil, +// diff:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// id:"", +// selfAddr:"", +// debugMsg:"", +// latestObject:nil, +// found:nil, +// diff:0, +// }, +// fields: fields { +// eg:nil, +// discoverer:nil, +// gateway:nil, +// checkedList:nil, +// checkedIndexCount:nil, +// correctedOldIndexCount:nil, +// correctedReplicationCount:nil, +// indexReplica:0, +// streamListConcurrency:0, +// backgroundSyncInterval:nil, +// backgroundCompactionInterval:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &correct{ +// eg: test.fields.eg, +// discoverer: test.fields.discoverer, +// gateway: test.fields.gateway, +// checkedList: test.fields.checkedList, +// checkedIndexCount: test.fields.checkedIndexCount, +// correctedOldIndexCount: test.fields.correctedOldIndexCount, +// correctedReplicationCount: test.fields.correctedReplicationCount, +// indexReplica: test.fields.indexReplica, +// streamListConcurrency: test.fields.streamListConcurrency, +// backgroundSyncInterval: test.fields.backgroundSyncInterval, +// backgroundCompactionInterval: test.fields.backgroundCompactionInterval, +// } +// +// err := c.correctShortage(test.args.ctx, test.args.id, test.args.selfAddr, test.args.debugMsg, test.args.latestObject, test.args.found, test.args.diff) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/index/job/correction/usecase/corrector.go b/pkg/index/job/correction/usecase/corrector.go index 0a364ccfbc..7c087470c0 100644 --- a/pkg/index/job/correction/usecase/corrector.go +++ b/pkg/index/job/correction/usecase/corrector.go @@ -16,7 +16,6 @@ package usecase import ( "context" "os" - "slices" "syscall" "time" @@ -86,10 +85,6 @@ func New(cfg *config.Data) (r runner.Runner, err error) { discoverer.WithDiscoverDuration(cfg.Corrector.Discoverer.Duration), discoverer.WithOptions(acOpts...), discoverer.WithNodeName(cfg.Corrector.NodeName), - discoverer.WithOnDiscoverFunc(func(_ context.Context, _ discoverer.Client, addrs []string) error { - slices.Reverse(addrs) - return nil - }), ) if err != nil { return nil, err @@ -185,10 +180,10 @@ func (r *run) Start(ctx context.Context) (<-chan error, error) { } })) - // main groutine to run the job + // main goroutine to run the job r.eg.Go(safety.RecoverFunc(func() (err error) { defer func() { - log.Info("fiding my pid to kill myself") + log.Info("finding my pid to kill myself") p, err := os.FindProcess(os.Getpid()) if err != nil { // using Fatal to avoid this process to be zombie diff --git a/pkg/index/job/creation/service/indexer.go b/pkg/index/job/creation/service/indexer.go index 1fc628265b..4ee0dd2508 100644 --- a/pkg/index/job/creation/service/indexer.go +++ b/pkg/index/job/creation/service/indexer.go @@ -175,7 +175,7 @@ func (idx *index) doCreateIndex( switch { case errors.Is(err, context.Canceled): err = status.WrapWithCanceled( - agent.CreateIndexRPCName+" API canceld", err, + agent.CreateIndexRPCName+" API canceled", err, ) attrs = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): diff --git a/pkg/index/job/creation/usecase/creation.go b/pkg/index/job/creation/usecase/creation.go index cf74be8fa0..bd46b60ac6 100644 --- a/pkg/index/job/creation/usecase/creation.go +++ b/pkg/index/job/creation/usecase/creation.go @@ -208,7 +208,7 @@ func (r *run) Stop(ctx context.Context) (errs error) { return errs } -// PtopStop is a method called after execution of Stop. +// PostStop is a method called after execution of Stop. func (*run) PostStop(_ context.Context) error { return nil } diff --git a/pkg/index/job/readreplica/rotate/service/rotator.go b/pkg/index/job/readreplica/rotate/service/rotator.go index 14cf8ca7c6..de18335550 100644 --- a/pkg/index/job/readreplica/rotate/service/rotator.go +++ b/pkg/index/job/readreplica/rotate/service/rotator.go @@ -17,7 +17,6 @@ import ( "context" "fmt" "reflect" - "strings" "time" "github.com/vdaas/vald/internal/errors" @@ -27,6 +26,7 @@ import ( "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/observability/trace" "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/internal/strings" "github.com/vdaas/vald/internal/sync/errgroup" "k8s.io/utils/ptr" ) @@ -198,7 +198,7 @@ func (s *subProcess) createSnapshot( oldSnap = cur.DeepCopy() newNameBase := getNewBaseName(cur.GetObjectMeta().GetName()) if newNameBase == "" { - return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replicaid", cur.GetObjectMeta().GetName()) + return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replica id", cur.GetObjectMeta().GetName()) } newSnap = &k8s.VolumeSnapshot{ ObjectMeta: k8s.ObjectMeta{ @@ -244,7 +244,7 @@ func (s *subProcess) createPVC( oldPvc = cur.DeepCopy() newNameBase := getNewBaseName(cur.GetObjectMeta().GetName()) if newNameBase == "" { - return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replicaid", cur.GetObjectMeta().GetName()) + return nil, nil, fmt.Errorf("the name(%s) doesn't seem to have replica id", cur.GetObjectMeta().GetName()) } // remove timestamp from old pvc name diff --git a/pkg/index/job/readreplica/rotate/usecase/rotate.go b/pkg/index/job/readreplica/rotate/usecase/rotate.go index 8ce19ef1db..f5cba4b246 100644 --- a/pkg/index/job/readreplica/rotate/usecase/rotate.go +++ b/pkg/index/job/readreplica/rotate/usecase/rotate.go @@ -165,7 +165,7 @@ func (r *run) Stop(ctx context.Context) (errs error) { return errs } -// PtopStop is a method called after execution of Stop. +// PostStop is a method called after execution of Stop. func (*run) PostStop(_ context.Context) error { return nil } diff --git a/pkg/index/job/save/service/indexer.go b/pkg/index/job/save/service/indexer.go index 1bf5af6f0c..5b14a16676 100644 --- a/pkg/index/job/save/service/indexer.go +++ b/pkg/index/job/save/service/indexer.go @@ -171,7 +171,7 @@ func (idx *index) doSaveIndex( switch { case errors.Is(err, context.Canceled): err = status.WrapWithCanceled( - agent.SaveIndexRPCName+" API canceld", err, + agent.SaveIndexRPCName+" API canceled", err, ) attrs = trace.StatusCodeCancelled(err.Error()) case errors.Is(err, context.DeadlineExceeded): diff --git a/pkg/index/job/save/service/indexer_test.go b/pkg/index/job/save/service/indexer_test.go index 34edd265fe..475f48012a 100644 --- a/pkg/index/job/save/service/indexer_test.go +++ b/pkg/index/job/save/service/indexer_test.go @@ -53,7 +53,7 @@ func Test_index_Start(t *testing.T) { } defaultCheckFunc := func(w want, err error) error { if !errors.Is(err, w.err) { - return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) + return errors.Errorf("got_error: \"%s\",\n\t\t\t\twant: \"%s\"", err.Error(), w.err.Error()) } return nil } diff --git a/pkg/index/job/save/usecase/save.go b/pkg/index/job/save/usecase/save.go index d1cd662989..9eff4e56c6 100644 --- a/pkg/index/job/save/usecase/save.go +++ b/pkg/index/job/save/usecase/save.go @@ -207,7 +207,7 @@ func (r *run) Stop(ctx context.Context) (errs error) { return errs } -// PtopStop is a method called after execution of Stop. +// PostStop is a method called after execution of Stop. func (*run) PostStop(_ context.Context) error { return nil } diff --git a/pkg/index/operator/service/operator.go b/pkg/index/operator/service/operator.go index 42c31f9910..0f8edac603 100644 --- a/pkg/index/operator/service/operator.go +++ b/pkg/index/operator/service/operator.go @@ -309,11 +309,11 @@ func (o *operator) ensureJobConcurrency( } for _, job := range jobList.Items { - annotaions := job.Spec.Template.Annotations - if annotaions == nil { + annotations := job.Spec.Template.Annotations + if annotations == nil { continue } - id, ok := annotaions[o.targetReadReplicaIDAnnotationsKey] + id, ok := annotations[o.targetReadReplicaIDAnnotationsKey] if !ok { continue } diff --git a/pkg/index/operator/usecase/operator.go b/pkg/index/operator/usecase/operator.go index 232bc73b0d..ab85510a96 100644 --- a/pkg/index/operator/usecase/operator.go +++ b/pkg/index/operator/usecase/operator.go @@ -155,7 +155,7 @@ func (r *run) Stop(ctx context.Context) (errs error) { return errs } -// PtopStop is a method called after execution of Stop. +// PostStop is a method called after execution of Stop. func (*run) PostStop(_ context.Context) error { return nil } diff --git a/pkg/manager/index/service/indexer.go b/pkg/manager/index/service/indexer.go index 03f4037e1f..e0eb87c4e6 100644 --- a/pkg/manager/index/service/indexer.go +++ b/pkg/manager/index/service/indexer.go @@ -24,10 +24,10 @@ import ( "sync/atomic" "time" - agent "github.com/vdaas/vald/apis/grpc/v1/agent/core" "github.com/vdaas/vald/apis/grpc/v1/payload" - vald "github.com/vdaas/vald/apis/grpc/v1/vald" + agent "github.com/vdaas/vald/internal/client/v1/client/agent/core" "github.com/vdaas/vald/internal/client/v1/client/discoverer" + vald "github.com/vdaas/vald/internal/client/v1/client/vald" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" @@ -56,7 +56,7 @@ type index struct { indexDurationLimit time.Duration saveIndexDuration time.Duration saveIndexDurationLimit time.Duration - shouldSaveList sync.Map[string, struct{}] + shouldSaveList sync.Map[string, bool] createIndexConcurrency int saveIndexConcurrency int indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -67,8 +67,6 @@ type index struct { uncommittedUUIDsCount uint32 } -var empty = struct{}{} - func New(opts ...Option) (idx Indexer, err error) { i := new(index) for _, opt := range append(defaultOptions, opts...) { @@ -243,7 +241,7 @@ func (idx *index) createIndex(ctx context.Context, enableLowIndexSkip bool) (err log.Warnf("an error occurred while calling CreateIndex of %s: %s", addr, err) return err } - _, ok = idx.shouldSaveList.LoadOrStore(addr, empty) + _, ok = idx.shouldSaveList.LoadOrStore(addr, true) if ok { log.Debugf("addr %s already queued for saveIndex", addr) return nil @@ -278,10 +276,10 @@ func (idx *index) saveIndex(ctx context.Context, force bool) (err error) { if err != nil { st, ok := status.FromError(err) if ok && st != nil && st.Code() == codes.FailedPrecondition { - log.Debugf("CreateIndex of %s skipped, message: %s, err: %v", addr, st.Message(), errors.Join(st.Err(), err)) + log.Debugf("SaveIndex of %s skipped, message: %s, err: %v", addr, st.Message(), errors.Join(st.Err(), err)) return nil } - log.Warnf("an error occurred while calling CreateIndex of %s: %s", addr, err) + log.Warnf("an error occurred while calling SaveIndex of %s: %s", addr, err) return err } return nil diff --git a/pkg/manager/index/service/indexer_test.go b/pkg/manager/index/service/indexer_test.go index f2b9ea4ed3..714f40345b 100644 --- a/pkg/manager/index/service/indexer_test.go +++ b/pkg/manager/index/service/indexer_test.go @@ -120,7 +120,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -283,7 +283,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -444,7 +444,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -604,7 +604,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -759,7 +759,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -907,7 +907,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -1055,7 +1055,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -1203,7 +1203,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] @@ -1351,7 +1351,7 @@ package service // indexDurationLimit time.Duration // saveIndexDuration time.Duration // saveIndexDurationLimit time.Duration -// shouldSaveList sync.Map[string, struct{}] +// shouldSaveList sync.Map[string, bool] // createIndexConcurrency int // saveIndexConcurrency int // indexInfos sync.Map[string, *payload.Info_Index_Count] diff --git a/pkg/tools/benchmark/job/config/config.go b/pkg/tools/benchmark/job/config/config.go index c06f8e10cc..7822d94b05 100644 --- a/pkg/tools/benchmark/job/config/config.go +++ b/pkg/tools/benchmark/job/config/config.go @@ -112,7 +112,7 @@ func NewConfig(ctx context.Context, path string) (cfg *Config, err error) { if jobResource.Spec.ServerConfig != nil { overrideCfg.Server = (*jobResource.Spec.ServerConfig).Bind() } - // jobResource.Spec has another field comparering Config.Job, so json.Marshal and Unmarshal are used for embedding field value of Config.Job from jobResource.Spec + // jobResource.Spec has another field comparing Config.Job, so json.Marshal and Unmarshal are used for embedding field value of Config.Job from jobResource.Spec var overrideJobCfg config.BenchmarkJob b, err := json.Marshal(*jobResource.Spec.DeepCopy()) if err == nil { diff --git a/pkg/tools/benchmark/operator/service/operator.go b/pkg/tools/benchmark/operator/service/operator.go index 62cf1e2a44..c8285ec3cd 100644 --- a/pkg/tools/benchmark/operator/service/operator.go +++ b/pkg/tools/benchmark/operator/service/operator.go @@ -193,8 +193,8 @@ func (o *operator) jobReconcile(ctx context.Context, jobList map[string][]k8s.Jo } // benchmarkJobStatus is used for update benchmark job resource status benchmarkJobStatus := make(map[string]v1.BenchmarkJobStatus) - // jobNames is used for check whether cjobs has delted job. - // If cjobs has the delted job, it will be remove the end of jobReconcile function. + // jobNames is used for check whether cjobs has deleted job. + // If cjobs has the deleted job, it will be remove the end of jobReconcile function. jobNames := map[string]struct{}{} for _, jobs := range jobList { cnt := len(jobs) diff --git a/pkg/tools/benchmark/operator/service/operator_test.go b/pkg/tools/benchmark/operator/service/operator_test.go index ba8e387f5c..8da05ed552 100644 --- a/pkg/tools/benchmark/operator/service/operator_test.go +++ b/pkg/tools/benchmark/operator/service/operator_test.go @@ -2911,7 +2911,7 @@ func Test_operator_checkAtomics(t *testing.T) { tests := []test{ func() test { return test{ - name: "return nil with no mismatch atmoics", + name: "return nil with no mismatch atomics", fields: fields{ scenarios: func() *atomic.Pointer[map[string]*scenario] { ap := atomic.Pointer[map[string]*scenario]{} diff --git a/rust/Cargo.lock b/rust/Cargo.lock index c173be0fff..d958880694 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "addr2line" @@ -22,19 +22,79 @@ name = "agent" version = "0.1.0" dependencies = [ "algorithm", + "anyhow", + "cargo", "prost 0.13.4", + "prost-types", "proto", "tokio", "tokio-stream", "tonic 0.12.3", + "tonic-types", +] + +[[package]] +name = "ahash" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +dependencies = [ + "cfg-if", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +dependencies = [ + "memchr", ] [[package]] name = "algorithm" version = "0.1.0" dependencies = [ + "anyhow", "faiss", "ngt", + "proto", + "qbg", + "tonic 0.12.3", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "annotate-snippets" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "710e8eae58854cdc1790fcb56cca04d712a17be849eeb81da2a724bf4bae2bc4" +dependencies = [ + "anstyle", + "unicode-width 0.2.0", +] + +[[package]] +name = "anstream" +version = "0.6.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8acc5369981196006228e28809f761875c0327210a891e941f4c683b3a99529b" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "is_terminal_polyfill", + "utf8parse", ] [[package]] @@ -43,11 +103,45 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "55cc3b69f167a1ef2e161439aa98aed94e6028e5f9a59be9a6ffb47aef1651f9" +[[package]] +name = "anstyle-parse" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b2d16507662817a6a20a9ea92df6652ee4f94f914589377d69f3b21bc5798a9" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "79947af37f4177cfead1110013d678905c37501914fba0efea834c3fe9a8d60c" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2109dbce0e72be3ec00bed26e6a7479ca384ad226efdd66db8fa2e3a38c83125" +dependencies = [ + "anstyle", + "windows-sys 0.59.0", +] + [[package]] name = "anyhow" -version = "1.0.95" +version = "1.0.94" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" + +[[package]] +name = "arc-swap" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "async-stream" @@ -73,9 +167,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.85" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f934833b4b7233644e5848f235df3f57ed8c80f1528a26c3dfa13d2147fa056" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", @@ -210,6 +304,12 @@ dependencies = [ "backtrace", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.21.7" @@ -222,6 +322,12 @@ version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" +[[package]] +name = "base64ct" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" + [[package]] name = "bitflags" version = "1.3.2" @@ -230,9 +336,38 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.8.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f68f53c83ab957f72c32642f3868eec03eb974d1fb82e453128456482613d36" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] +name = "bitmaps" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" +dependencies = [ + "typenum", +] + +[[package]] +name = "block-buffer" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" +dependencies = [ + "generic-array", +] + +[[package]] +name = "bstr" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "786a307d683a5bf92e6fd5fd69a7eb613751668d1d8d67d802846dfe367c62c8" +dependencies = [ + "memchr", + "regex-automata 0.4.9", + "serde", +] [[package]] name = "bumpalo" @@ -252,12 +387,191 @@ version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "325918d6fe32f23b19878fe4b34794ae41fc19ddbe53b10571a4874d44ffd39b" +[[package]] +name = "bytesize" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3e368af43e418a04d52505cf3dbc23dda4e3407ae2fa99fd0e4f308ce546acc" + +[[package]] +name = "cargo" +version = "0.81.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f24c9dcadcdad2f6fa2553b63d5e9c9700fa6932b75d53f3b11b8aea35ebab99" +dependencies = [ + "annotate-snippets", + "anstream", + "anstyle", + "anyhow", + "base64 0.22.1", + "bytesize", + "cargo-credential", + "cargo-credential-libsecret", + "cargo-credential-macos-keychain", + "cargo-credential-wincred", + "cargo-platform", + "cargo-util", + "cargo-util-schemas", + "clap", + "color-print", + "crates-io", + "curl", + "curl-sys", + "filetime", + "flate2", + "git2", + "git2-curl", + "gix", + "glob", + "hex", + "hmac", + "home", + "http-auth", + "humantime", + "ignore", + "im-rc", + "indexmap 2.7.0", + "itertools 0.12.1", + "jobserver", + "lazycell", + "libc", + "libgit2-sys", + "memchr", + "opener", + "os_info", + "pasetors", + "pathdiff", + "rand", + "regex", + "rusqlite", + "rustfix", + "same-file", + "semver", + "serde", + "serde-untagged", + "serde_ignored", + "serde_json", + "sha1", + "shell-escape", + "supports-hyperlinks", + "supports-unicode", + "tar", + "tempfile", + "time", + "toml", + "toml_edit", + "tracing", + "tracing-chrome", + "tracing-subscriber", + "unicase", + "unicode-width 0.1.14", + "url", + "walkdir", + "windows-sys 0.52.0", +] + +[[package]] +name = "cargo-credential" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8951a7e8159904939f036c967e24b60636efd6a6f22379af6b32afc6b867a2b8" +dependencies = [ + "anyhow", + "libc", + "serde", + "serde_json", + "thiserror 1.0.69", + "time", + "windows-sys 0.59.0", +] + +[[package]] +name = "cargo-credential-libsecret" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9327125ccc2cf986f111a3342fe0a431702017a85f1b68831ebbf4331519d27d" +dependencies = [ + "anyhow", + "cargo-credential", + "libloading", +] + +[[package]] +name = "cargo-credential-macos-keychain" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d331053cb9e1d39bcbb2bf3b9979bad399e6f630f54c29fd64396a57887fb281" +dependencies = [ + "cargo-credential", + "security-framework", +] + +[[package]] +name = "cargo-credential-wincred" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97741d5bcaf92bc5cfa9678dd4bfb2a32d5109626ce8186fbcc6a3ea811eacaa" +dependencies = [ + "cargo-credential", + "windows-sys 0.59.0", +] + +[[package]] +name = "cargo-platform" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e35af189006b9c0f00a064685c727031e3ed2d8020f7ba284d78cc2671bd36ea" +dependencies = [ + "serde", +] + +[[package]] +name = "cargo-util" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b15bbe49616ee353fadadf6de5a24136f3fe8fdbd5eb0894be9f8a42c905674" +dependencies = [ + "anyhow", + "core-foundation 0.10.0", + "filetime", + "hex", + "ignore", + "jobserver", + "libc", + "miow", + "same-file", + "sha2", + "shell-escape", + "tempfile", + "tracing", + "walkdir", + "windows-sys 0.59.0", +] + +[[package]] +name = "cargo-util-schemas" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "34ddc7fc157e3dbbd88f05ef8be7c3ed3ecb05925a3f51f716d6103a607fb7c4" +dependencies = [ + "semver", + "serde", + "serde-untagged", + "serde-value", + "thiserror 1.0.69", + "toml", + "unicode-xid", + "url", +] + [[package]] name = "cc" -version = "1.2.9" +version = "1.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8293772165d9345bdaaa39b45b2109591e63fe5e6fbc23c6ff930a048aa310b" +checksum = "c31a0499c1dc64f458ad13872de75c0eb7e3fdb0e67964610c914b034fc5956e" dependencies = [ + "jobserver", + "libc", "shlex", ] @@ -269,22 +583,24 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "clap" -version = "4.5.26" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8eb5e908ef3a6efbe1ed62520fb7287959888c88485abe072543190ecc66783" +checksum = "3135e7ec2ef7b10c6ed8950f0f792ed96ee093fa088608f1c76e569722700c84" dependencies = [ "clap_builder", ] [[package]] name = "clap_builder" -version = "4.5.26" +version = "4.5.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b01801b5fc6a0a232407abc821660c9c6d25a1cafc0d4f85f29fb8d9afc121" +checksum = "30582fc632330df2bd26877bde0c1f4470d57c582bbc070376afcd04d8cb4838" dependencies = [ + "anstream", "anstyle", "clap_lex", "strsim", + "terminal_size", ] [[package]] @@ -293,6 +609,12 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46ad14479a25103f283c0f10005961cf086d8dc42205bb44c46ac563475dca6" +[[package]] +name = "clru" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cbd0f76e066e64fdc5631e3bb46381254deab9ef1158292f27c8c57e3bf3fe59" + [[package]] name = "codespan-reporting" version = "0.11.1" @@ -300,9 +622,42 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3538270d33cc669650c4b093848450d380def10c331d38c768e34cac80576e6e" dependencies = [ "termcolor", - "unicode-width", + "unicode-width 0.1.14", +] + +[[package]] +name = "color-print" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3aa954171903797d5623e047d9ab69d91b493657917bdfb8c2c80ecaf9cdb6f4" +dependencies = [ + "color-print-proc-macro", ] +[[package]] +name = "color-print-proc-macro" +version = "0.3.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "692186b5ebe54007e45a59aea47ece9eb4108e141326c304cdc91699a7118a22" +dependencies = [ + "nom", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "colorchoice" +version = "1.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b63caa9aa9397e2d9480a9b13673856c78d8ac123288526c37d7839f2a86990" + +[[package]] +name = "const-oid" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" + [[package]] name = "core-foundation" version = "0.9.4" @@ -313,12 +668,45 @@ dependencies = [ "libc", ] +[[package]] +name = "core-foundation" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b55271e5c8c478ad3f38ad24ef34923091e0548492a266d19b3c0b4d82574c63" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "core-foundation-sys" version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" +[[package]] +name = "cpufeatures" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16b80225097f2e5ae4e7179dd2266824648f3e2f49d9134d584b76389d31c4c3" +dependencies = [ + "libc", +] + +[[package]] +name = "crates-io" +version = "0.40.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6cee4e56fd9f9c23d0654bdcf8c6ad1943b66743b34baeb852fe5318d255c8d0" +dependencies = [ + "curl", + "percent-encoding", + "serde", + "serde_json", + "thiserror 1.0.69", + "url", +] + [[package]] name = "crc32fast" version = "1.4.2" @@ -328,6 +716,25 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06ba6d68e24814cb8de6bb986db8222d3a027d15872cabc0d18817bc3c0e4471" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-deque" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9dd111b7b7f7d55b72c0a6ae361660ee5853c9af73f70c3c2ef6858b950e2e51" +dependencies = [ + "crossbeam-epoch", + "crossbeam-utils", +] + [[package]] name = "crossbeam-epoch" version = "0.9.18" @@ -344,57 +751,116 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] -name = "cxx" -version = "1.0.136" +name = "crypto-bigint" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad7c7515609502d316ab9a24f67dc045132d93bfd3f00713389e90d9898bf30d" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ - "cc", - "cxxbridge-cmd", - "cxxbridge-flags", - "cxxbridge-macro", - "foldhash", - "link-cplusplus", + "generic-array", + "rand_core", + "subtle", + "zeroize", ] [[package]] -name = "cxx-build" -version = "1.0.136" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bfd16fca6fd420aebbd80d643c201ee4692114a0de208b790b9cd02ceae65fb" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "cc", - "codespan-reporting", - "proc-macro2", - "quote", - "scratch", - "syn", + "generic-array", + "typenum", ] [[package]] -name = "cxxbridge-cmd" -version = "1.0.136" +name = "ct-codecs" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c33fd49f5d956a1b7ee5f7a9768d58580c6752838d92e39d0d56439efdedc35" -dependencies = [ - "clap", - "codespan-reporting", - "proc-macro2", - "quote", - "syn", -] +checksum = "b916ba8ce9e4182696896f015e8a5ae6081b305f74690baa8465e35f5a142ea4" [[package]] -name = "cxxbridge-flags" -version = "1.0.136" +name = "curl" +version = "0.4.47" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be0f1077278fac36299cce8446effd19fe93a95eedb10d39265f3bf67b3036c9" - -[[package]] +checksum = "d9fb4d13a1be2b58f14d60adba57c9834b78c62fd86c3e76a148f732686e9265" +dependencies = [ + "curl-sys", + "libc", + "openssl-probe", + "openssl-sys", + "schannel", + "socket2", + "windows-sys 0.52.0", +] + +[[package]] +name = "curl-sys" +version = "0.4.78+curl-8.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8eec768341c5c7789611ae51cf6c459099f22e64a5d5d0ce4892434e33821eaf" +dependencies = [ + "cc", + "libc", + "libnghttp2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", + "windows-sys 0.52.0", +] + +[[package]] +name = "cxx" +version = "1.0.135" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4d44ff199ff93242c3afe480ab588d544dd08d72e92885e152ffebc670f076ad" +dependencies = [ + "cc", + "cxxbridge-cmd", + "cxxbridge-flags", + "cxxbridge-macro", + "foldhash", + "link-cplusplus", +] + +[[package]] +name = "cxx-build" +version = "1.0.135" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66fd8f17ad454fc1e4f4ab83abffcc88a532e90350d3ffddcb73030220fcbd52" +dependencies = [ + "cc", + "codespan-reporting", + "proc-macro2", + "quote", + "scratch", + "syn", +] + +[[package]] +name = "cxxbridge-cmd" +version = "1.0.135" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4717c9c806a9e07fdcb34c84965a414ea40fafe57667187052cf1eb7f5e8a8a9" +dependencies = [ + "clap", + "codespan-reporting", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "cxxbridge-flags" +version = "1.0.135" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2f6515329bf3d98f4073101c7866ff2bec4e635a13acb82e3f3753fff0bf43cb" + +[[package]] name = "cxxbridge-macro" -version = "1.0.136" +version = "1.0.135" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3da7e4d6e74af6b79031d264b2f13c3ea70af1978083741c41ffce9308f1f24f" +checksum = "fb93e6a7ce8ec985c02bbb758237a31598b340acbbc3c19c5a4fa6adaaac92ab" dependencies = [ "proc-macro2", "quote", @@ -403,10 +869,48 @@ dependencies = [ ] [[package]] -name = "defer" -version = "0.2.1" +name = "dbus" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "930c7171c8df9fb1782bdf9b918ed9ed2d33d1d22300abb754f9085bc48bf8e8" +checksum = "1bb21987b9fb1613058ba3843121dd18b163b254d8a6e797e144cbac14d96d1b" +dependencies = [ + "libc", + "libdbus-sys", + "winapi", +] + +[[package]] +name = "der" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" +dependencies = [ + "const-oid", + "pem-rfc7468", + "zeroize", +] + +[[package]] +name = "deranged" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" +dependencies = [ + "powerfmt", + "serde", +] + +[[package]] +name = "digest" +version = "0.10.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" +dependencies = [ + "block-buffer", + "const-oid", + "crypto-common", + "subtle", +] [[package]] name = "displaydoc" @@ -419,12 +923,62 @@ dependencies = [ "syn", ] +[[package]] +name = "dunce" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + +[[package]] +name = "ed25519-compact" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e9b3460f44bea8cd47f45a0c70892f1eff856d97cd55358b2f73f663789f6190" +dependencies = [ + "getrandom", +] + [[package]] name = "either" version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array", + "group", + "hkdf", + "pem-rfc7468", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "encoding_rs" version = "0.8.35" @@ -440,6 +994,16 @@ version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +[[package]] +name = "erased-serde" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24e2389d65ab4fab27dc2a5de7b191e1f6617d1f1c8855c0dc569c94a4cbb18d" +dependencies = [ + "serde", + "typeid", +] + [[package]] name = "errno" version = "0.3.10" @@ -454,6 +1018,69 @@ dependencies = [ name = "faiss" version = "0.1.0" +[[package]] +name = "fallible-iterator" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" + +[[package]] +name = "fallible-streaming-iterator" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" + +[[package]] +name = "faster-hex" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2a2b11eda1d40935b26cf18f6833c526845ae8c41e58d09af6adeb6f0269183" + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core", + "subtle", +] + +[[package]] +name = "fiat-crypto" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" + +[[package]] +name = "filetime" +version = "0.2.25" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35c0522e981e68cbfa8c3f978441a5f34b30b96e146b33cd3359176b50fe8586" +dependencies = [ + "cfg-if", + "libc", + "libredox", + "windows-sys 0.59.0", +] + +[[package]] +name = "flate2" +version = "1.0.35" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c936bfdafb507ebbf50b8074c54fa31c5be9a1e7e5f467dd659697041407d07c" +dependencies = [ + "crc32fast", + "libz-sys", + "miniz_oxide", +] + [[package]] name = "fnv" version = "1.0.7" @@ -461,133 +1088,953 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] -name = "foldhash" -version = "0.1.4" +name = "foldhash" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" + +[[package]] +name = "form_urlencoded" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures-channel" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +dependencies = [ + "futures-core", +] + +[[package]] +name = "futures-core" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + +[[package]] +name = "futures-executor" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + +[[package]] +name = "futures-macro" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "futures-sink" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + +[[package]] +name = "futures-task" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + +[[package]] +name = "futures-util" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +dependencies = [ + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "pin-utils", + "slab", +] + +[[package]] +name = "generic-array" +version = "0.14.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" +dependencies = [ + "typenum", + "version_check", + "zeroize", +] + +[[package]] +name = "getrandom" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "wasi", + "wasm-bindgen", +] + +[[package]] +name = "gimli" +version = "0.31.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" + +[[package]] +name = "git2" +version = "0.18.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "232e6a7bfe35766bf715e55a88b39a700596c0ccfd88cd3680b4cdb40d66ef70" +dependencies = [ + "bitflags 2.6.0", + "libc", + "libgit2-sys", + "log", + "openssl-probe", + "openssl-sys", + "url", +] + +[[package]] +name = "git2-curl" +version = "0.19.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78e26b61608c573ffd26fc79061a823aa5147449a1afe1f61679a21e2031f7c3" +dependencies = [ + "curl", + "git2", + "log", + "url", +] + +[[package]] +name = "gix" +version = "0.63.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "984c5018adfa7a4536ade67990b3ebc6e11ab57b3d6cd9968de0947ca99b4b06" +dependencies = [ + "gix-actor", + "gix-attributes", + "gix-command", + "gix-commitgraph", + "gix-config", + "gix-credentials", + "gix-date 0.8.7", + "gix-diff", + "gix-dir", + "gix-discover", + "gix-features", + "gix-filter", + "gix-fs", + "gix-glob", + "gix-hash", + "gix-hashtable", + "gix-ignore", + "gix-index", + "gix-lock", + "gix-macros", + "gix-negotiate", + "gix-object", + "gix-odb", + "gix-pack", + "gix-path", + "gix-pathspec", + "gix-prompt", + "gix-protocol", + "gix-ref", + "gix-refspec", + "gix-revision", + "gix-revwalk", + "gix-sec", + "gix-submodule", + "gix-tempfile", + "gix-trace", + "gix-transport", + "gix-traverse", + "gix-url", + "gix-utils", + "gix-validate", + "gix-worktree", + "once_cell", + "parking_lot", + "prodash", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-actor" +version = "0.31.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a0e454357e34b833cc3a00b6efbbd3dd4d18b24b9fb0c023876ec2645e8aa3f2" +dependencies = [ + "bstr", + "gix-date 0.8.7", + "gix-utils", + "itoa", + "thiserror 1.0.69", + "winnow", +] + +[[package]] +name = "gix-attributes" +version = "0.22.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebccbf25aa4a973dd352564a9000af69edca90623e8a16dad9cbc03713131311" +dependencies = [ + "bstr", + "gix-glob", + "gix-path", + "gix-quote", + "gix-trace", + "kstring", + "smallvec", + "thiserror 1.0.69", + "unicode-bom", +] + +[[package]] +name = "gix-bitmap" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d48b897b4bbc881aea994b4a5bbb340a04979d7be9089791304e04a9fbc66b53" +dependencies = [ + "thiserror 2.0.8", +] + +[[package]] +name = "gix-chunk" +version = "0.4.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6ffbeb3a5c0b8b84c3fe4133a6f8c82fa962f4caefe8d0762eced025d3eb4f7" +dependencies = [ + "thiserror 2.0.8", +] + +[[package]] +name = "gix-command" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d7d6b8f3a64453fd7e8191eb80b351eb7ac0839b40a1237cd2c137d5079fe53" +dependencies = [ + "bstr", + "gix-path", + "gix-trace", + "shell-words", +] + +[[package]] +name = "gix-commitgraph" +version = "0.24.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "133b06f67f565836ec0c473e2116a60fb74f80b6435e21d88013ac0e3c60fc78" +dependencies = [ + "bstr", + "gix-chunk", + "gix-features", + "gix-hash", + "memmap2", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-config" +version = "0.37.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53fafe42957e11d98e354a66b6bd70aeea00faf2f62dd11164188224a507c840" +dependencies = [ + "bstr", + "gix-config-value", + "gix-features", + "gix-glob", + "gix-path", + "gix-ref", + "gix-sec", + "memchr", + "once_cell", + "smallvec", + "thiserror 1.0.69", + "unicode-bom", + "winnow", +] + +[[package]] +name = "gix-config-value" +version = "0.14.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49aaeef5d98390a3bcf9dbc6440b520b793d1bf3ed99317dc407b02be995b28e" +dependencies = [ + "bitflags 2.6.0", + "bstr", + "gix-path", + "libc", + "thiserror 2.0.8", +] + +[[package]] +name = "gix-credentials" +version = "0.24.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8ce391d305968782f1ae301c4a3d42c5701df7ff1d8bc03740300f6fd12bce78" +dependencies = [ + "bstr", + "gix-command", + "gix-config-value", + "gix-path", + "gix-prompt", + "gix-sec", + "gix-trace", + "gix-url", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-date" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9eed6931f21491ee0aeb922751bd7ec97b4b2fe8fbfedcb678e2a2dce5f3b8c0" +dependencies = [ + "bstr", + "itoa", + "thiserror 1.0.69", + "time", +] + +[[package]] +name = "gix-date" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "691142b1a34d18e8ed6e6114bc1a2736516c5ad60ef3aa9bd1b694886e3ca92d" +dependencies = [ + "bstr", + "itoa", + "jiff", + "thiserror 2.0.8", +] + +[[package]] +name = "gix-diff" +version = "0.44.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1996d5c8a305b59709467d80617c9fde48d9d75fd1f4179ea970912630886c9d" +dependencies = [ + "bstr", + "gix-hash", + "gix-object", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-dir" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60c99f8c545abd63abe541d20ab6cda347de406c0a3f1c80aadc12d9b0e94974" +dependencies = [ + "bstr", + "gix-discover", + "gix-fs", + "gix-ignore", + "gix-index", + "gix-object", + "gix-path", + "gix-pathspec", + "gix-trace", + "gix-utils", + "gix-worktree", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-discover" +version = "0.32.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc27c699b63da66b50d50c00668bc0b7e90c3a382ef302865e891559935f3dbf" +dependencies = [ + "bstr", + "dunce", + "gix-fs", + "gix-hash", + "gix-path", + "gix-ref", + "gix-sec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-features" +version = "0.38.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac7045ac9fe5f9c727f38799d002a7ed3583cd777e3322a7c4b43e3cf437dc69" +dependencies = [ + "bytes", + "crc32fast", + "crossbeam-channel", + "flate2", + "gix-hash", + "gix-trace", + "gix-utils", + "libc", + "once_cell", + "parking_lot", + "prodash", + "sha1_smol", + "thiserror 1.0.69", + "walkdir", +] + +[[package]] +name = "gix-filter" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6547738da28275f4dff4e9f3a0f28509f53f94dd6bd822733c91cb306bca61a" +dependencies = [ + "bstr", + "encoding_rs", + "gix-attributes", + "gix-command", + "gix-hash", + "gix-object", + "gix-packetline-blocking", + "gix-path", + "gix-quote", + "gix-trace", + "gix-utils", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-fs" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2bfe6249cfea6d0c0e0990d5226a4cb36f030444ba9e35e0639275db8f98575" +dependencies = [ + "fastrand", + "gix-features", + "gix-utils", +] + +[[package]] +name = "gix-glob" +version = "0.16.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "74908b4bbc0a0a40852737e5d7889f676f081e340d5451a16e5b4c50d592f111" +dependencies = [ + "bitflags 2.6.0", + "bstr", + "gix-features", + "gix-path", +] + +[[package]] +name = "gix-hash" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93d7df7366121b5018f947a04d37f034717e113dcf9ccd85c34b58e57a74d5e" +dependencies = [ + "faster-hex", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-hashtable" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7ddf80e16f3c19ac06ce415a38b8591993d3f73aede049cb561becb5b3a8e242" +dependencies = [ + "gix-hash", + "hashbrown 0.14.5", + "parking_lot", +] + +[[package]] +name = "gix-ignore" +version = "0.11.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e447cd96598460f5906a0f6c75e950a39f98c2705fc755ad2f2020c9e937fab7" +dependencies = [ + "bstr", + "gix-glob", + "gix-path", + "gix-trace", + "unicode-bom", +] + +[[package]] +name = "gix-index" +version = "0.33.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a9a44eb55bd84bb48f8a44980e951968ced21e171b22d115d1cdcef82a7d73f" +dependencies = [ + "bitflags 2.6.0", + "bstr", + "filetime", + "fnv", + "gix-bitmap", + "gix-features", + "gix-fs", + "gix-hash", + "gix-lock", + "gix-object", + "gix-traverse", + "gix-utils", + "gix-validate", + "hashbrown 0.14.5", + "itoa", + "libc", + "memmap2", + "rustix", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-lock" +version = "14.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bc7fe297f1f4614774989c00ec8b1add59571dc9b024b4c00acb7dedd4e19d" +dependencies = [ + "gix-tempfile", + "gix-utils", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-macros" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "999ce923619f88194171a67fb3e6d613653b8d4d6078b529b15a765da0edcc17" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "gix-negotiate" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ec879fb6307bb63519ba89be0024c6f61b4b9d61f1a91fd2ce572d89fe9c224" +dependencies = [ + "bitflags 2.6.0", + "gix-commitgraph", + "gix-date 0.8.7", + "gix-hash", + "gix-object", + "gix-revwalk", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-object" +version = "0.42.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25da2f46b4e7c2fa7b413ce4dffb87f69eaf89c2057e386491f4c55cadbfe386" +dependencies = [ + "bstr", + "gix-actor", + "gix-date 0.8.7", + "gix-features", + "gix-hash", + "gix-utils", + "gix-validate", + "itoa", + "smallvec", + "thiserror 1.0.69", + "winnow", +] + +[[package]] +name = "gix-odb" +version = "0.61.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20d384fe541d93d8a3bb7d5d5ef210780d6df4f50c4e684ccba32665a5e3bc9b" +dependencies = [ + "arc-swap", + "gix-date 0.8.7", + "gix-features", + "gix-fs", + "gix-hash", + "gix-object", + "gix-pack", + "gix-path", + "gix-quote", + "parking_lot", + "tempfile", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-pack" +version = "0.51.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3e0594491fffe55df94ba1c111a6566b7f56b3f8d2e1efc750e77d572f5f5229" +dependencies = [ + "clru", + "gix-chunk", + "gix-features", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-path", + "gix-tempfile", + "memmap2", + "parking_lot", + "smallvec", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-packetline" +version = "0.17.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c43ef4d5fe2fa222c606731c8bdbf4481413ee4ef46d61340ec39e4df4c5e49" +dependencies = [ + "bstr", + "faster-hex", + "gix-trace", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-packetline-blocking" +version = "0.17.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9802304baa798dd6f5ff8008a2b6516d54b74a69ca2d3a2b9e2d6c3b5556b40" +dependencies = [ + "bstr", + "faster-hex", + "gix-trace", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-path" +version = "0.10.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afc292ef1a51e340aeb0e720800338c805975724c1dfbd243185452efd8645b7" +dependencies = [ + "bstr", + "gix-trace", + "home", + "once_cell", + "thiserror 2.0.8", +] + +[[package]] +name = "gix-pathspec" +version = "0.7.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d23bf239532b4414d0e63b8ab3a65481881f7237ed9647bb10c1e3cc54c5ceb" +dependencies = [ + "bitflags 2.6.0", + "bstr", + "gix-attributes", + "gix-config-value", + "gix-glob", + "gix-path", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-prompt" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a7822afc4bc9c5fbbc6ce80b00f41c129306b7685cac3248dbfa14784960594" +dependencies = [ + "gix-command", + "gix-config-value", + "parking_lot", + "rustix", + "thiserror 2.0.8", +] + +[[package]] +name = "gix-protocol" +version = "0.45.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc43a1006f01b5efee22a003928c9eb83dde2f52779ded9d4c0732ad93164e3e" +dependencies = [ + "bstr", + "gix-credentials", + "gix-date 0.9.2", + "gix-features", + "gix-hash", + "gix-transport", + "gix-utils", + "maybe-async", + "thiserror 1.0.69", + "winnow", +] + +[[package]] +name = "gix-quote" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" +checksum = "64a1e282216ec2ab2816cd57e6ed88f8009e634aec47562883c05ac8a7009a63" +dependencies = [ + "bstr", + "gix-utils", + "thiserror 2.0.8", +] [[package]] -name = "form_urlencoded" -version = "1.2.1" +name = "gix-ref" +version = "0.44.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "3394a2997e5bc6b22ebc1e1a87b41eeefbcfcff3dbfa7c4bd73cb0ac8f1f3e2e" dependencies = [ - "percent-encoding", + "gix-actor", + "gix-date 0.8.7", + "gix-features", + "gix-fs", + "gix-hash", + "gix-lock", + "gix-object", + "gix-path", + "gix-tempfile", + "gix-utils", + "gix-validate", + "memmap2", + "thiserror 1.0.69", + "winnow", ] [[package]] -name = "fs2" -version = "0.4.3" +name = "gix-refspec" +version = "0.23.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9564fc758e15025b46aa6643b1b77d047d1a56a1aea6e01002ac0c7026876213" +checksum = "6868f8cd2e62555d1f7c78b784bece43ace40dd2a462daf3b588d5416e603f37" dependencies = [ - "libc", - "winapi", + "bstr", + "gix-hash", + "gix-revision", + "gix-validate", + "smallvec", + "thiserror 1.0.69", ] [[package]] -name = "futures-channel" -version = "0.3.31" +name = "gix-revision" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "01b13e43c2118c4b0537ddac7d0821ae0dfa90b7b8dbf20c711e153fb749adce" dependencies = [ - "futures-core", + "bstr", + "gix-date 0.8.7", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-revwalk", + "gix-trace", + "thiserror 1.0.69", ] [[package]] -name = "futures-core" -version = "0.3.31" +name = "gix-revwalk" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "1b030ccaab71af141f537e0225f19b9e74f25fefdba0372246b844491cab43e0" +dependencies = [ + "gix-commitgraph", + "gix-date 0.8.7", + "gix-hash", + "gix-hashtable", + "gix-object", + "smallvec", + "thiserror 1.0.69", +] [[package]] -name = "futures-executor" -version = "0.3.31" +name = "gix-sec" +version = "0.10.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "a8b876ef997a955397809a2ec398d6a45b7a55b4918f2446344330f778d14fd6" dependencies = [ - "futures-core", - "futures-task", - "futures-util", + "bitflags 2.6.0", + "gix-path", + "libc", + "windows-sys 0.52.0", ] [[package]] -name = "futures-io" -version = "0.3.31" +name = "gix-submodule" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "921cd49924ac14b6611b22e5fb7bbba74d8780dc7ad26153304b64d1272460ac" +dependencies = [ + "bstr", + "gix-config", + "gix-path", + "gix-pathspec", + "gix-refspec", + "gix-url", + "thiserror 1.0.69", +] [[package]] -name = "futures-macro" -version = "0.3.31" +name = "gix-tempfile" +version = "14.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "046b4927969fa816a150a0cda2e62c80016fe11fb3c3184e4dddf4e542f108aa" dependencies = [ - "proc-macro2", - "quote", - "syn", + "gix-fs", + "libc", + "once_cell", + "parking_lot", + "tempfile", ] [[package]] -name = "futures-sink" -version = "0.3.31" +name = "gix-trace" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "04bdde120c29f1fc23a24d3e115aeeea3d60d8e65bab92cc5f9d90d9302eb952" [[package]] -name = "futures-task" -version = "0.3.31" +name = "gix-transport" +version = "0.42.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "421dcccab01b41a15d97b226ad97a8f9262295044e34fbd37b10e493b0a6481f" +dependencies = [ + "base64 0.22.1", + "bstr", + "curl", + "gix-command", + "gix-credentials", + "gix-features", + "gix-packetline", + "gix-quote", + "gix-sec", + "gix-url", + "thiserror 1.0.69", +] + +[[package]] +name = "gix-traverse" +version = "0.39.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e499a18c511e71cf4a20413b743b9f5bcf64b3d9e81e9c3c6cd399eae55a8840" +dependencies = [ + "bitflags 2.6.0", + "gix-commitgraph", + "gix-date 0.8.7", + "gix-hash", + "gix-hashtable", + "gix-object", + "gix-revwalk", + "smallvec", + "thiserror 1.0.69", +] [[package]] -name = "futures-util" -version = "0.3.31" +name = "gix-url" +version = "0.27.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "fd280c5e84fb22e128ed2a053a0daeacb6379469be6a85e3d518a0636e160c89" dependencies = [ - "futures-core", - "futures-io", - "futures-macro", - "futures-sink", - "futures-task", - "memchr", - "pin-project-lite", - "pin-utils", - "slab", + "bstr", + "gix-features", + "gix-path", + "home", + "thiserror 1.0.69", + "url", ] [[package]] -name = "fxhash" -version = "0.2.1" +name = "gix-utils" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" +checksum = "ba427e3e9599508ed98a6ddf8ed05493db114564e338e41f6a996d2e4790335f" dependencies = [ - "byteorder", + "bstr", + "fastrand", + "unicode-normalization", ] [[package]] -name = "getrandom" -version = "0.2.15" +name = "gix-validate" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "82c27dd34a49b1addf193c92070bcbf3beaf6e10f16a78544de6372e146a0acf" dependencies = [ - "cfg-if", - "libc", - "wasi", + "bstr", + "thiserror 1.0.69", ] [[package]] -name = "gimli" -version = "0.31.1" +name = "gix-worktree" +version = "0.34.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "26f7326ebe0b9172220694ea69d344c536009a9b98fb0f9de092c440f3efe7a6" +dependencies = [ + "bstr", + "gix-attributes", + "gix-features", + "gix-fs", + "gix-glob", + "gix-hash", + "gix-ignore", + "gix-index", + "gix-object", + "gix-path", + "gix-validate", +] [[package]] name = "glob" -version = "0.3.2" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" + +[[package]] +name = "globset" +version = "0.4.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "15f1ce686646e7f1e19bf7d5533fe443a45dbfb990e00629110797578b42fb19" +dependencies = [ + "aho-corasick", + "bstr", + "log", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "group" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] [[package]] name = "h2" @@ -633,12 +2080,64 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" +dependencies = [ + "ahash", + "allocator-api2", +] + [[package]] name = "hashbrown" version = "0.15.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +[[package]] +name = "hashlink" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ba4ff7128dee98c7dc9794b6a411377e1404dba1c97deb8d1a55297bd25d8af" +dependencies = [ + "hashbrown 0.14.5", +] + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "hkdf" +version = "0.12.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" +dependencies = [ + "hmac", +] + +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest", +] + +[[package]] +name = "home" +version = "0.5.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589533453244b0995c858700322199b2becb13b627df2851f64a2775d024abcf" +dependencies = [ + "windows-sys 0.59.0", +] + [[package]] name = "http" version = "0.2.12" @@ -661,6 +2160,15 @@ dependencies = [ "itoa", ] +[[package]] +name = "http-auth" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "150fa4a9462ef926824cf4519c84ed652ca8f4fbae34cb8af045b5cbcaf98822" +dependencies = [ + "memchr", +] + [[package]] name = "http-body" version = "0.4.6" @@ -707,6 +2215,12 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "humantime" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" + [[package]] name = "hyper" version = "0.14.32" @@ -935,6 +2449,36 @@ dependencies = [ "icu_properties", ] +[[package]] +name = "ignore" +version = "0.4.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6d89fd380afde86567dfba715db065673989d6253f42b88179abd3eae47bda4b" +dependencies = [ + "crossbeam-deque", + "globset", + "log", + "memchr", + "regex-automata 0.4.9", + "same-file", + "walkdir", + "winapi-util", +] + +[[package]] +name = "im-rc" +version = "15.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af1955a75fa080c677d3972822ec4bad316169ab1cfc6c257a942c2265dbe5fe" +dependencies = [ + "bitmaps", + "rand_core", + "rand_xoshiro", + "sized-chunks", + "typenum", + "version_check", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -955,15 +2499,6 @@ dependencies = [ "hashbrown 0.15.2", ] -[[package]] -name = "instant" -version = "0.1.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" -dependencies = [ - "cfg-if", -] - [[package]] name = "ipnet" version = "2.10.1" @@ -976,6 +2511,12 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7655c9839580ee829dfacba1d1278c2b7883e50a277ff7541299489d6bdfdc45" +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" version = "0.12.1" @@ -1000,27 +2541,57 @@ version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d75a2a4b1b190afb6f5425f10f6a8f959d2ea0b9c2b1d79553551850539e4674" +[[package]] +name = "jiff" +version = "0.1.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db69f08d4fb10524cacdb074c10b296299d71274ddbc830a8ee65666867002e9" +dependencies = [ + "jiff-tzdb-platform", + "windows-sys 0.59.0", +] + +[[package]] +name = "jiff-tzdb" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91335e575850c5c4c673b9bd467b0e025f164ca59d0564f69d0c2ee0ffad4653" + +[[package]] +name = "jiff-tzdb-platform" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9835f0060a626fe59f160437bc725491a6af23133ea906500027d1bd2f8f4329" +dependencies = [ + "jiff-tzdb", +] + +[[package]] +name = "jobserver" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "6717b6b5b077764fb5966237269cb3c64edddde4b14ce42647430a78ced9e7b7" dependencies = [ "once_cell", "wasm-bindgen", ] [[package]] -name = "kv" -version = "0.24.0" +name = "kstring" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "620727085ac39ee9650b373fe6d8073a0aee6f99e52a9c72b25f7671078039ab" +checksum = "558bf9508a558512042d3095138b1f7b8fe90c5467d94f9f1da28b3731c5dbd1" dependencies = [ - "pin-project-lite", - "serde", - "sled", - "thiserror", - "toml", + "static_assertions", ] [[package]] @@ -1029,12 +2600,110 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" +[[package]] +name = "libdbus-sys" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06085512b750d640299b79be4bad3d2fa90a9c00b1fd9e1b46364f66f0485c72" +dependencies = [ + "cc", + "pkg-config", +] + +[[package]] +name = "libgit2-sys" +version = "0.16.2+1.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8" +dependencies = [ + "cc", + "libc", + "libssh2-sys", + "libz-sys", + "openssl-sys", + "pkg-config", +] + +[[package]] +name = "libloading" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" +dependencies = [ + "cfg-if", + "windows-targets 0.52.6", +] + +[[package]] +name = "libnghttp2-sys" +version = "0.1.10+1.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "959c25552127d2e1fa72f0e52548ec04fc386e827ba71a7bd01db46a447dc135" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", + "redox_syscall", +] + +[[package]] +name = "libsqlite3-sys" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c10584274047cb335c23d3e61bcef8e323adae7c5c8c760540f73610177fc3f" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libssh2-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dc8a030b787e2119a731f1951d6a773e2280c660f8ec4b0f5e1505a386e71ee" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "link-cplusplus" version = "1.0.9" @@ -1046,9 +2715,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.15" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "litemap" @@ -1068,9 +2737,18 @@ dependencies = [ [[package]] name = "log" -version = "0.4.25" +version = "0.4.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" + +[[package]] +name = "matchers" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04cbf5b083de1c7e0222a7a51dbfdba1cbe1c6ab0b15e29fff3f6c077fd9cd9f" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata 0.1.10", +] [[package]] name = "matchit" @@ -1078,23 +2756,37 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +[[package]] +name = "maybe-async" +version = "0.2.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5cf92c10c7e361d6b99666ec1c6f9805b0bea2c3bd8c78dc6fe98ac5bd78db11" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "memchr" version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +[[package]] +name = "memmap2" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd3f7eed9d3848f8b98834af67102b720745c4ec028fcd0aa0239277e7de374f" +dependencies = [ + "libc", +] + [[package]] name = "meta" version = "0.1.0" dependencies = [ - "defer", - "kv", - "observability", - "opentelemetry", - "prost-types", "proto", - "sled", "tokio", "tonic 0.12.3", ] @@ -1115,8 +2807,8 @@ dependencies = [ "supports-unicode", "terminal_size", "textwrap", - "thiserror", - "unicode-width", + "thiserror 1.0.69", + "unicode-width 0.1.14", ] [[package]] @@ -1136,11 +2828,17 @@ version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" -version = "0.8.3" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8402cab7aefae129c6977bb0ff1b8fd9a04eb5b51efc50a70bea51cda0c7924" +checksum = "4ffbe83022cedc1d264172192511ae958937694cd57ce297164951b8b3568394" dependencies = [ "adler2", ] @@ -1156,6 +2854,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "miow" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "359f76430b20a79f9e20e115b3428614e654f04fab314482fc0fda0ebd3c6044" +dependencies = [ + "windows-sys 0.48.0", +] + [[package]] name = "ngt" version = "0.1.0" @@ -1167,6 +2874,41 @@ dependencies = [ "rand", ] +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "normpath" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c8911957c4b1549ac0dc74e30db9c8b0e66ddcd6d7acc33098f4c63a64a6d7ed" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "nu-ansi-term" +version = "0.46.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +dependencies = [ + "overload", + "winapi", +] + +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-traits" version = "0.2.19" @@ -1176,11 +2918,20 @@ dependencies = [ "autocfg", ] +[[package]] +name = "num_threads" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c7398b9c8b70908f6371f47ed36737907c87c52af34c268fed0bf0ceb92ead9" +dependencies = [ + "libc", +] + [[package]] name = "object" -version = "0.36.7" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] @@ -1207,6 +2958,36 @@ version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" +[[package]] +name = "opener" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0812e5e4df08da354c851a3376fead46db31c2214f849d3de356d774d057681" +dependencies = [ + "bstr", + "dbus", + "normpath", + "windows-sys 0.59.0", +] + +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + +[[package]] +name = "openssl-sys" +version = "0.9.104" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45abf306cbf99debc8195b66b7346498d7b10c210de50418b5ccd7ceba08c741" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + [[package]] name = "opentelemetry" version = "0.23.0" @@ -1218,7 +2999,7 @@ dependencies = [ "js-sys", "once_cell", "pin-project-lite", - "thiserror", + "thiserror 1.0.69", ] [[package]] @@ -1249,7 +3030,7 @@ dependencies = [ "opentelemetry_sdk", "prost 0.12.6", "reqwest", - "thiserror", + "thiserror 1.0.69", "tokio", "tonic 0.11.0", ] @@ -1286,24 +3067,60 @@ dependencies = [ "lazy_static", "once_cell", "opentelemetry", - "ordered-float", + "ordered-float 4.5.0", "percent-encoding", "rand", "serde_json", - "thiserror", + "thiserror 1.0.69", "tokio", "tokio-stream", ] [[package]] name = "ordered-float" -version = "4.6.0" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "ordered-float" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bb71e1b3fa6ca1c61f383464aaf2bb0e2f8e772a1f01d486832464de363b951" +checksum = "c65ee1f9701bf938026630b455d5315f490640234259037edb259798b3bcf85e" dependencies = [ "num-traits", ] +[[package]] +name = "orion" +version = "0.17.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97ab5415cf60cd271259e576f2ddee7a5f9fed42659035224c01af766943fad3" +dependencies = [ + "fiat-crypto", + "subtle", + "zeroize", +] + +[[package]] +name = "os_info" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5ca711d8b83edbb00b44d504503cd247c9c0bd8b0fa2694f2a1a3d8165379ce" +dependencies = [ + "log", + "windows-sys 0.52.0", +] + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + [[package]] name = "owo-colors" version = "4.1.0" @@ -1311,14 +3128,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fb37767f6569cd834a413442455e0f066d0d522de8630436e2a1761d9726ba56" [[package]] -name = "parking_lot" -version = "0.11.2" +name = "p384" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", ] [[package]] @@ -1328,34 +3146,41 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", - "parking_lot_core 0.9.10", + "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.6" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", - "instant", "libc", - "redox_syscall 0.2.16", + "redox_syscall", "smallvec", - "winapi", + "windows-targets 0.52.6", ] [[package]] -name = "parking_lot_core" -version = "0.9.10" +name = "pasetors" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "6b36d47c66f2230dd1b7143d9afb2b4891879020210eddf2ccb624e529b96dba" dependencies = [ - "cfg-if", - "libc", - "redox_syscall 0.5.8", - "smallvec", - "windows-targets 0.52.6", + "ct-codecs", + "ed25519-compact", + "getrandom", + "orion", + "p384", + "rand_core", + "regex", + "serde", + "serde_json", + "sha2", + "subtle", + "time", + "zeroize", ] [[package]] @@ -1364,6 +3189,21 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pathdiff" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df94ce210e5bc13cb6651479fa48d14f601d9858cfe0467f43ae157023b938d3" + +[[package]] +name = "pem-rfc7468" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "88b39c9bfcfc231068454382784bb460aae594343fb030d46e9f50a645418412" +dependencies = [ + "base64ct", +] + [[package]] name = "percent-encoding" version = "2.3.1" @@ -1372,18 +3212,18 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pin-project" -version = "1.1.8" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e2ec53ad785f4d35dac0adea7f7dc6f1bb277ad84a680c7afefeae05d1f5916" +checksum = "be57f64e946e500c8ee36ef6331845d40a93055567ec57e8fae13efd33759b95" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.8" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56a66c0c55993aa927429d0f8a0abfd74f084e4d9c192cffed01e418d83eefb" +checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", @@ -1392,9 +3232,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.16" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" +checksum = "915a1e146535de9163f3987b8944ed8cf49a18bb0056bcebcdcece385cece4ff" [[package]] name = "pin-utils" @@ -1402,6 +3242,28 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" +[[package]] +name = "pkcs8" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" +dependencies = [ + "der", + "spki", +] + +[[package]] +name = "pkg-config" +version = "0.3.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" + +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.20" @@ -1411,15 +3273,33 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + [[package]] name = "proc-macro2" -version = "1.0.93" +version = "1.0.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60946a68e5f9d28b0dc1c21bb8a97ee7d018a8b322fa57838ba31cc878e22d99" +checksum = "37d3544b3f2748c54e147655edb5025752e2303145b5aefb3c3ea2c78b973bb0" dependencies = [ "unicode-ident", ] +[[package]] +name = "prodash" +version = "28.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "744a264d26b88a6a7e37cbad97953fa233b94d585236310bcbc88474b4092d79" +dependencies = [ + "parking_lot", +] + [[package]] name = "prost" version = "0.12.6" @@ -1486,11 +3366,21 @@ dependencies = [ "tonic-types", ] +[[package]] +name = "qbg" +version = "0.1.0" +dependencies = [ + "anyhow", + "cxx", + "cxx-build", + "miette", +] + [[package]] name = "quote" -version = "1.0.38" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -1526,12 +3416,12 @@ dependencies = [ ] [[package]] -name = "redox_syscall" -version = "0.2.16" +name = "rand_xoshiro" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" dependencies = [ - "bitflags 1.3.2", + "rand_core", ] [[package]] @@ -1540,9 +3430,53 @@ version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "03a862b389f93e68874fbf580b9de08dd02facb9a788ebadaf4a3fd33cf58834" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", +] + +[[package]] +name = "regex" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata 0.4.9", + "regex-syntax 0.8.5", +] + +[[package]] +name = "regex-automata" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" +dependencies = [ + "regex-syntax 0.6.29", +] + +[[package]] +name = "regex-automata" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax 0.8.5", ] +[[package]] +name = "regex-syntax" +version = "0.6.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" + +[[package]] +name = "regex-syntax" +version = "0.8.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" + [[package]] name = "reqwest" version = "0.11.27" @@ -1579,19 +3513,55 @@ dependencies = [ "winreg", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + +[[package]] +name = "rusqlite" +version = "0.31.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b838eba278d213a8beaf485bd313fd580ca4505a00d5871caeb1457c55322cae" +dependencies = [ + "bitflags 2.6.0", + "fallible-iterator", + "fallible-streaming-iterator", + "hashlink", + "libsqlite3-sys", + "smallvec", +] + [[package]] name = "rustc-demangle" version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +[[package]] +name = "rustfix" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82fa69b198d894d84e23afde8e9ab2af4400b2cba20d6bf2b428a8b01c222c5a" +dependencies = [ + "serde", + "serde_json", + "thiserror 1.0.69", + "tracing", +] + [[package]] name = "rustix" -version = "0.38.43" +version = "0.38.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a78891ee6bf2340288408954ac787aa063d8e8817e9f53abb37c695c6d834ef6" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" dependencies = [ - "bitflags 2.8.0", + "bitflags 2.6.0", "errno", "libc", "linux-raw-sys", @@ -1600,9 +3570,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.19" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c45b9784283f1b2e7fb61b42047c2fd678ef0960d4f6f1eba131594cc369d4" +checksum = "0e819f2bc632f285be6d7cd36e25940d45b2391dd6d9b939e79de557f7014248" [[package]] name = "ryu" @@ -1611,42 +3581,136 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] -name = "scopeguard" -version = "1.2.0" +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "schannel" +version = "0.1.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +dependencies = [ + "windows-sys 0.59.0", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "scratch" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" + +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + +[[package]] +name = "security-framework" +version = "2.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +dependencies = [ + "bitflags 2.6.0", + "core-foundation 0.9.4", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1863fd3768cd83c56a7f60faa4dc0d403f1b6df0a38c3c25f44b7894e45370d5" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "semver" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cb6eb87a131f756572d7fb904f6e7b68633f09cca868c5df1c4b8d1a694bbba" +dependencies = [ + "serde", +] + +[[package]] +name = "serde" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +checksum = "0b9781016e935a97e8beecf0c933758c97a5520d32930e460142b4cd80c6338e" +dependencies = [ + "serde_derive", +] [[package]] -name = "scratch" -version = "1.0.7" +name = "serde-untagged" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3cf7c11c38cb994f3d40e8a8cde3bbd1f72a435e4c49e85d6553d8312306152" +checksum = "2676ba99bd82f75cae5cbd2c8eda6fa0b8760f18978ea840e980dd5567b5c5b6" +dependencies = [ + "erased-serde", + "serde", + "typeid", +] [[package]] -name = "serde" -version = "1.0.217" +name = "serde-value" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" +checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c" dependencies = [ - "serde_derive", + "ordered-float 2.10.1", + "serde", ] [[package]] name = "serde_derive" -version = "1.0.217" +version = "1.0.216" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" +checksum = "46f859dbbf73865c6627ed570e78961cd3ac92407a2d117204c49232485da55e" dependencies = [ "proc-macro2", "quote", "syn", ] +[[package]] +name = "serde_ignored" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8e319a36d1b52126a0d608f24e93b2d81297091818cd70625fcf50a15d84ddf" +dependencies = [ + "serde", +] + [[package]] name = "serde_json" -version = "1.0.135" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b0d7ba2887406110130a978386c4e1befb98c674b4fba677954e4db976630d9" +checksum = "c7fceb2473b9166b2294ef05efcb65a3db80803f0b03ef86a5fc88a2b85ee377" dependencies = [ "itoa", "memchr", @@ -1654,6 +3718,15 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -1666,6 +3739,55 @@ dependencies = [ "serde", ] +[[package]] +name = "sha1" +version = "0.10.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sha1_smol" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" + +[[package]] +name = "sha2" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "shell-escape" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45bb67a18fa91266cc7807181f62f9178a6873bfad7dc788c42e6430db40184f" + +[[package]] +name = "shell-words" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" + [[package]] name = "shlex" version = "1.3.0" @@ -1682,28 +3804,32 @@ dependencies = [ ] [[package]] -name = "slab" -version = "0.4.9" +name = "signature" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ - "autocfg", + "digest", + "rand_core", ] [[package]] -name = "sled" -version = "0.34.7" +name = "sized-chunks" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" +checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e" dependencies = [ - "crc32fast", - "crossbeam-epoch", - "crossbeam-utils", - "fs2", - "fxhash", - "libc", - "log", - "parking_lot 0.11.2", + "bitmaps", + "typenum", +] + +[[package]] +name = "slab" +version = "0.4.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" +dependencies = [ + "autocfg", ] [[package]] @@ -1722,18 +3848,40 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "spki" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" +dependencies = [ + "base64ct", + "der", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" +[[package]] +name = "static_assertions" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + [[package]] name = "strsim" version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" +[[package]] +name = "subtle" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + [[package]] name = "supports-color" version = "3.0.2" @@ -1757,9 +3905,9 @@ checksum = "b7401a30af6cb5818bb64852270bb722533397edcfc7344954a38f420819ece2" [[package]] name = "syn" -version = "2.0.96" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5d0adab1ae378d7f53bdebc67a39f1f151407ef230f0ce2883572f5d8985c80" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -1796,7 +3944,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", - "core-foundation", + "core-foundation 0.9.4", "system-configuration-sys", ] @@ -1810,6 +3958,29 @@ dependencies = [ "libc", ] +[[package]] +name = "tar" +version = "0.4.43" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c65998313f8e17d0d553d28f91a0df93e4dbbbf770279c7bc21ca0f09ea1a1f6" +dependencies = [ + "filetime", + "libc", +] + +[[package]] +name = "tempfile" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys 0.59.0", +] + [[package]] name = "termcolor" version = "1.4.1" @@ -1836,7 +4007,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" dependencies = [ "unicode-linebreak", - "unicode-width", + "unicode-width 0.1.14", ] [[package]] @@ -1845,7 +4016,16 @@ version = "1.0.69" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" dependencies = [ - "thiserror-impl", + "thiserror-impl 1.0.69", +] + +[[package]] +name = "thiserror" +version = "2.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08f5383f3e0071702bf93ab5ee99b52d26936be9dedd9413067cbdcddcb6141a" +dependencies = [ + "thiserror-impl 2.0.8", ] [[package]] @@ -1859,6 +4039,60 @@ dependencies = [ "syn", ] +[[package]] +name = "thiserror-impl" +version = "2.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f357fcec90b3caef6623a099691be676d033b40a058ac95d2a6ade6fa0c943" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "time" +version = "0.3.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" +dependencies = [ + "deranged", + "itoa", + "libc", + "num-conv", + "num_threads", + "powerfmt", + "serde", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" + +[[package]] +name = "time-macros" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" +dependencies = [ + "num-conv", + "time-core", +] + [[package]] name = "tinystr" version = "0.7.6" @@ -1869,17 +4103,32 @@ dependencies = [ "zerovec", ] +[[package]] +name = "tinyvec" +version = "1.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +dependencies = [ + "tinyvec_macros", +] + +[[package]] +name = "tinyvec_macros" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + [[package]] name = "tokio" -version = "1.43.0" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d61fa4ffa3de412bfea335c6ecff681de2b609ba3c77ef3e00e521813a9ed9e" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", "libc", "mio", - "parking_lot 0.12.3", + "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", @@ -1899,9 +4148,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", @@ -1935,11 +4184,36 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.11" +version = "0.8.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f7f0dd8d50a853a531c426359045b1998f04219d88799810762cd4ad314234" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ + "indexmap 2.7.0", "serde", + "serde_spanned", + "toml_datetime", + "winnow", ] [[package]] @@ -2078,6 +4352,17 @@ dependencies = [ "syn", ] +[[package]] +name = "tracing-chrome" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf0a738ed5d6450a9fb96e86a23ad808de2b727fd1394585da5cdd6788ffe724" +dependencies = [ + "serde_json", + "tracing-core", + "tracing-subscriber", +] + [[package]] name = "tracing-core" version = "0.1.33" @@ -2085,6 +4370,36 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", + "valuable", +] + +[[package]] +name = "tracing-log" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" +dependencies = [ + "log", + "once_cell", + "tracing-core", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +dependencies = [ + "matchers", + "nu-ansi-term", + "once_cell", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", ] [[package]] @@ -2093,6 +4408,30 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +[[package]] +name = "typeid" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e13db2e0ccd5e14a544e8a246ba2312cd25223f616442d7f2cb0e3db614236e" + +[[package]] +name = "typenum" +version = "1.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" + +[[package]] +name = "unicase" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e51b68083f157f853b6379db119d1c1be0e6e4dec98101079dec41f6f5cf6df" + +[[package]] +name = "unicode-bom" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7eec5d1121208364f6793f7d2e222bf75a915c19557537745b195b253dd64217" + [[package]] name = "unicode-ident" version = "1.0.14" @@ -2105,12 +4444,33 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" +[[package]] +name = "unicode-normalization" +version = "0.1.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" +dependencies = [ + "tinyvec", +] + [[package]] name = "unicode-width" version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" +[[package]] +name = "unicode-width" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fc81956842c57dac11422a97c3b8195a1ff727f06e85c84ed2e8aa277c9a0fd" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + [[package]] name = "url" version = "2.5.4" @@ -2134,6 +4494,40 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" +[[package]] +name = "utf8parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "vcpkg" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + [[package]] name = "want" version = "0.3.1" @@ -2151,21 +4545,20 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "a474f6281d1d70c17ae7aa6a613c87fce69a127e2624002df63dcb39d6cf6396" dependencies = [ "cfg-if", "once_cell", - "rustversion", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "5f89bb38646b4f81674e8f5c3fb81b562be1fd936d84320f3264486418519c79" dependencies = [ "bumpalo", "log", @@ -2177,9 +4570,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "38176d9b44ea84e9184eff0bc34cc167ed044f816accfe5922e54d84cf48eca2" dependencies = [ "cfg-if", "js-sys", @@ -2190,9 +4583,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "2cc6181fd9a7492eef6fef1f33961e3695e4579b9872a6f7c83aee556666d4fe" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -2200,9 +4593,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "30d7a95b763d3c45903ed6c81f156801839e5ee968bb07e534c44df0fcd330c2" dependencies = [ "proc-macro2", "quote", @@ -2213,18 +4606,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" -dependencies = [ - "unicode-ident", -] +checksum = "943aab3fdaaa029a6e0271b35ea10b72b943135afe9bffca82384098ad0e06a6" [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "04dd7223427d52553d3702c004d3b2fe07c148165faa56313cb00211e31c12bc" dependencies = [ "js-sys", "wasm-bindgen", @@ -2409,6 +4799,15 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +[[package]] +name = "winnow" +version = "0.6.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" +dependencies = [ + "memchr", +] + [[package]] name = "winreg" version = "0.50.0" @@ -2497,6 +4896,12 @@ dependencies = [ "synstructure", ] +[[package]] +name = "zeroize" +version = "1.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" + [[package]] name = "zerovec" version = "0.10.4" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 8253ebd1ed..18fa300ecc 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -15,11 +15,12 @@ # [workspace] members = [ - "bin/agent", + "libs/observability", + "libs/proto", "bin/meta", + "bin/agent", "libs/algorithm", "libs/algorithms/ngt", + "libs/algorithms/qbg", "libs/algorithms/faiss", - "libs/observability", - "libs/proto", ] diff --git a/rust/bin/agent/Cargo.toml b/rust/bin/agent/Cargo.toml index b369671595..0fe99925d9 100644 --- a/rust/bin/agent/Cargo.toml +++ b/rust/bin/agent/Cargo.toml @@ -22,8 +22,12 @@ edition = "2021" [dependencies] algorithm = { version = "0.1.0", path = "../../libs/algorithm" } -prost = "0.13.1" +anyhow = "1.0.88" +cargo = "0.81.0" +prost = "0.13.2" +prost-types = "0.13.2" proto = { version = "0.1.0", path = "../../libs/proto" } -tokio = { version = "1.39.3", features = ["full"] } -tokio-stream = { version = "0.1.15", features = ["full"] } -tonic = "0.12.1" +tokio = { version = "1.40.0", features = ["full"] } +tokio-stream = { version = "0.1.16", features = ["full"] } +tonic = "0.12.2" +tonic-types = "0.12.2" diff --git a/rust/bin/agent/src/handler.rs b/rust/bin/agent/src/handler.rs index 5e7655e613..3a2421cee4 100644 --- a/rust/bin/agent/src/handler.rs +++ b/rust/bin/agent/src/handler.rs @@ -14,14 +14,29 @@ // limitations under the License. // mod common; -mod index; -mod insert; -mod remove; -mod search; -mod update; -mod upsert; +pub mod index; +pub mod insert; +pub mod remove; +pub mod search; +pub mod update; +pub mod upsert; -#[derive(Default, Debug)] pub struct Agent { - + s: Box, + name: String, + ip: String, + resource_type: String, + api_name: String, +} + +impl Agent { + pub fn new(s: impl algorithm::ANN + 'static, name: &str, ip: &str, resource_type: &str, api_name: &str) -> Self { + Self { + s: Box::new(s), + name: name.to_string(), + ip: ip.to_string(), + resource_type: resource_type.to_string(), + api_name: api_name.to_string(), + } + } } diff --git a/rust/bin/agent/src/handler/index.rs b/rust/bin/agent/src/handler/index.rs index a4a11d8286..eada900410 100644 --- a/rust/bin/agent/src/handler/index.rs +++ b/rust/bin/agent/src/handler/index.rs @@ -15,7 +15,7 @@ // use proto::{ core::v1::agent_server, - payload::v1::{control, info, object, Empty}, + payload::v1::{control, info, Empty}, vald::v1::index_server, }; @@ -23,14 +23,14 @@ use proto::{ impl agent_server::Agent for super::Agent { async fn create_index( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } async fn save_index( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -38,7 +38,7 @@ impl agent_server::Agent for super::Agent { #[doc = " Represent the creating and saving index RPC.\n"] async fn create_and_save_index( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -49,7 +49,7 @@ impl index_server::Index for super::Agent { #[doc = " Represent the RPC to get the agent index information.\n"] async fn index_info( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -57,14 +57,14 @@ impl index_server::Index for super::Agent { #[doc = " Represent the RPC to get the agent index detailed information.\n"] async fn index_detail( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } #[doc = " Represent the RPC to get the agent index statistics.\n"] async fn index_statistics( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -72,7 +72,7 @@ impl index_server::Index for super::Agent { #[doc = " Represent the RPC to get the agent index detailed statistics.\n"] async fn index_statistics_detail( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -80,7 +80,7 @@ impl index_server::Index for super::Agent { #[doc = " Represent the RPC to get the index property.\n"] async fn index_property( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } diff --git a/rust/bin/agent/src/handler/insert.rs b/rust/bin/agent/src/handler/insert.rs index 9916415533..03582afac7 100644 --- a/rust/bin/agent/src/handler/insert.rs +++ b/rust/bin/agent/src/handler/insert.rs @@ -21,7 +21,7 @@ use proto::{ impl insert_server::Insert for super::Agent { async fn insert( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -32,7 +32,7 @@ impl insert_server::Insert for super::Agent { #[doc = " A method to add new multiple vectors by bidirectional streaming.\n"] async fn stream_insert( &self, - request: tonic::Request>, + _request: tonic::Request>, ) -> std::result::Result, tonic::Status> { todo!() } @@ -40,7 +40,7 @@ impl insert_server::Insert for super::Agent { #[doc = " A method to add new multiple vectors in a single request.\n"] async fn multi_insert( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } diff --git a/rust/bin/agent/src/handler/remove.rs b/rust/bin/agent/src/handler/remove.rs index 2e7e9fb061..8ae8127e4b 100644 --- a/rust/bin/agent/src/handler/remove.rs +++ b/rust/bin/agent/src/handler/remove.rs @@ -22,7 +22,7 @@ use proto::{ impl remove_server::Remove for super::Agent { async fn remove( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -30,7 +30,7 @@ impl remove_server::Remove for super::Agent { #[doc = " A method to remove an indexed vector based on timestamp.\n"] async fn remove_by_timestamp( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -41,7 +41,7 @@ impl remove_server::Remove for super::Agent { #[doc = " A method to remove multiple indexed vectors by bidirectional streaming.\n"] async fn stream_remove( &self, - request: tonic::Request>, + _request: tonic::Request>, ) -> std::result::Result, tonic::Status> { todo!() } @@ -49,7 +49,7 @@ impl remove_server::Remove for super::Agent { #[doc = " A method to remove multiple indexed vectors in a single request.\n"] async fn multi_remove( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } diff --git a/rust/bin/agent/src/handler/search.rs b/rust/bin/agent/src/handler/search.rs index 54788fa269..709bfe8df9 100644 --- a/rust/bin/agent/src/handler/search.rs +++ b/rust/bin/agent/src/handler/search.rs @@ -13,21 +13,95 @@ // See the License for the specific language governing permissions and // limitations under the License. // +use std::{collections::HashMap, string::String}; + +use algorithm::Error; +use anyhow::Result; use proto::{payload::v1::search, vald::v1::search_server}; +use prost::Message; +use tonic::{Code, Status}; +use tonic_types::{ErrorDetails, FieldViolation, StatusExt}; #[tonic::async_trait] impl search_server::Search for super::Agent { async fn search( &self, request: tonic::Request, - ) -> Result, tonic::Status> { - todo!() + ) -> Result, Status> { + println!("Recieved a request from {:?}", request.remote_addr()); + let req = request.get_ref(); + let config = req.config.clone().unwrap(); + let hostname = cargo::util::hostname()?; + let domain = hostname.to_str().unwrap(); + if req.vector.len() != self.s.get_dimension_size() { + let err = Error::IncompatibleDimensionSize{ got: req.vector.len(), want: self.s.get_dimension_size()}; + let mut err_details = ErrorDetails::new(); + err_details.set_error_info(err.to_string(), domain, HashMap::new()); + err_details.set_request_info(config.request_id, String::from_utf8(req.encode_to_vec()).unwrap()); + err_details.set_bad_request(vec![FieldViolation::new("vector dimension size", err.to_string())]); + err_details.set_resource_info(self.resource_type.clone() + "/ngt.Search", "", "", ""); + let status = Status::with_error_details(Code::InvalidArgument, "Search API Incombatible Dimension Size detedted", err_details); + return Err(status); + } + + let result = self.s.search(req.vector.clone(), config.num, config.epsilon, config.radius); + match result { + Err(err) => { + let metadata = HashMap::new(); + let resource_type = self.resource_type.clone() + "/ngt.Search"; + let resource_name = format!("{}: {}({})", self.api_name, self.name, self.ip); + let status = match err { + Error::CreateIndexingIsInProgress{} => { + let mut err_details = ErrorDetails::new(); + err_details.set_error_info(err.to_string(), domain, metadata); + err_details.set_request_info(config.request_id, String::from_utf8(req.encode_to_vec()).unwrap()); + err_details.set_resource_info(resource_type, resource_name, "", ""); + Status::with_error_details(Code::Aborted, "Search API aborted to process search request due to creating indices is in progress", err_details) + } + Error::FlushingIsInProgress{} => { + let mut err_details = ErrorDetails::new(); + err_details.set_error_info(err.to_string(), domain, metadata); + err_details.set_request_info(config.request_id, String::from_utf8(req.encode_to_vec()).unwrap()); + err_details.set_resource_info(resource_type, resource_name, "", ""); + Status::with_error_details(Code::Aborted, "Search API aborted to process search request due to flushing indices is in progress", err_details) + } + Error::EmptySearchResult{} => { + let request_id = config.request_id; + let mut err_details = ErrorDetails::new(); + err_details.set_error_info(err.to_string(), domain, metadata); + err_details.set_request_info(&request_id, String::from_utf8(req.encode_to_vec()).unwrap()); + err_details.set_resource_info(resource_type, resource_name, "", ""); + Status::with_error_details(Code::NotFound, format!("Search API requestID {}'s search result not found", &request_id), err_details) + } + Error::IncompatibleDimensionSize{ got: _, want: _ } => { + let mut err_details = ErrorDetails::new(); + err_details.set_error_info(err.to_string(), domain, metadata); + err_details.set_request_info(config.request_id, String::from_utf8(req.encode_to_vec()).unwrap()); + err_details.set_resource_info(resource_type, resource_name, "", ""); + err_details.set_bad_request(vec![FieldViolation::new("vector dimension size", err.to_string())]); + Status::with_error_details(Code::InvalidArgument, "Search API Incompatible Dimension Size detected", err_details) + } + _ => { + let mut err_details = ErrorDetails::new(); + err_details.set_error_info(err.to_string(), domain, metadata); + err_details.set_request_info(config.request_id, String::from_utf8(req.encode_to_vec()).unwrap()); + err_details.set_resource_info(resource_type, resource_name, "", ""); + Status::with_error_details(Code::Internal, "Search API failed to process search request", err_details) + } + }; + Err(status) + } + Ok(mut response) => { + response.get_mut().request_id = config.request_id; + Ok(response) + } + } } #[doc = " A method to search indexed vectors by ID.\n"] async fn search_by_id( &self, - request: tonic::Request, + _request: tonic::Request, ) -> Result, tonic::Status> { todo!() } @@ -38,7 +112,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to search indexed vectors by multiple vectors.\n"] async fn stream_search( &self, - request: tonic::Request>, + _request: tonic::Request>, ) -> std::result::Result, tonic::Status> { todo!() } @@ -49,7 +123,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to search indexed vectors by multiple IDs.\n"] async fn stream_search_by_id( &self, - request: tonic::Request>, + _request: tonic::Request>, ) -> std::result::Result, tonic::Status> { todo!() } @@ -57,7 +131,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to search indexed vectors by multiple vectors in a single request.\n"] async fn multi_search( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -65,7 +139,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to search indexed vectors by multiple IDs in a single request.\n"] async fn multi_search_by_id( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -73,7 +147,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to linear search indexed vectors by a raw vector.\n"] async fn linear_search( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -81,7 +155,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to linear search indexed vectors by ID.\n"] async fn linear_search_by_id( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -92,7 +166,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to linear search indexed vectors by multiple vectors.\n"] async fn stream_linear_search( &self, - request: tonic::Request>, + _request: tonic::Request>, ) -> std::result::Result, tonic::Status> { todo!() } @@ -103,7 +177,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to linear search indexed vectors by multiple IDs.\n"] async fn stream_linear_search_by_id( &self, - request: tonic::Request>, + _request: tonic::Request>, ) -> std::result::Result, tonic::Status> { todo!() @@ -112,7 +186,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to linear search indexed vectors by multiple vectors in a single\n request.\n"] async fn multi_linear_search( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -120,7 +194,7 @@ impl search_server::Search for super::Agent { #[doc = " A method to linear search indexed vectors by multiple IDs in a single\n request.\n"] async fn multi_linear_search_by_id( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } diff --git a/rust/bin/agent/src/handler/update.rs b/rust/bin/agent/src/handler/update.rs index f08f7469f4..9f4465de91 100644 --- a/rust/bin/agent/src/handler/update.rs +++ b/rust/bin/agent/src/handler/update.rs @@ -22,7 +22,7 @@ use proto::{ impl update_server::Update for super::Agent { async fn update( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -33,7 +33,7 @@ impl update_server::Update for super::Agent { #[doc = " A method to update multiple indexed vectors by bidirectional streaming.\n"] async fn stream_update( &self, - request: tonic::Request>, + _request: tonic::Request>, ) -> std::result::Result, tonic::Status> { todo!() } @@ -41,8 +41,16 @@ impl update_server::Update for super::Agent { #[doc = " A method to update multiple indexed vectors in a single request.\n"] async fn multi_update( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } + + #[doc = " A method to update timestamp indexed vectors in a single request.\n"] + async fn update_timestamp( + &self, + _request: tonic::Request, + ) -> std::result::Result, tonic::Status> { + todo!() + } } diff --git a/rust/bin/agent/src/handler/upsert.rs b/rust/bin/agent/src/handler/upsert.rs index 933e7785ac..a162b84dc2 100644 --- a/rust/bin/agent/src/handler/upsert.rs +++ b/rust/bin/agent/src/handler/upsert.rs @@ -22,7 +22,7 @@ use proto::{ impl upsert_server::Upsert for super::Agent { async fn upsert( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } @@ -33,7 +33,7 @@ impl upsert_server::Upsert for super::Agent { #[doc = " A method to insert/update multiple vectors by bidirectional streaming.\n"] async fn stream_upsert( &self, - request: tonic::Request>, + _request: tonic::Request>, ) -> std::result::Result, tonic::Status> { todo!() } @@ -41,7 +41,7 @@ impl upsert_server::Upsert for super::Agent { #[doc = " A method to insert/update multiple vectors in a single request.\n"] async fn multi_upsert( &self, - request: tonic::Request, + _request: tonic::Request, ) -> std::result::Result, tonic::Status> { todo!() } diff --git a/rust/bin/agent/src/main.rs b/rust/bin/agent/src/main.rs index cda11757cd..d1e8d41264 100644 --- a/rust/bin/agent/src/main.rs +++ b/rust/bin/agent/src/main.rs @@ -14,12 +14,32 @@ // limitations under the License. // +use algorithm::Error; +use anyhow::Result; +use proto::payload::v1::search; + mod handler; +#[derive(Debug)] +struct MockService { + dim: usize +} + +impl algorithm::ANN for MockService { + fn get_dimension_size(&self) -> usize { + self.dim + } + + fn search(&self, _vector: Vec, dim: u32, _epsilon: f32, _radius: f32) -> Result, Error> { + Err(Error::IncompatibleDimensionSize{got: dim as usize, want: self.dim}.into()) + } +} + #[tokio::main] async fn main() -> Result<(), Box> { - let addr = "[::1]:8081".parse()?; - let agent = handler::Agent::default(); + let addr = "0.0.0.0:8081".parse()?; + let service = MockService{ dim: 42 }; + let agent = handler::Agent::new(service, "agent-ngt", "127.0.0.1", "vald/internal/core/algorithm", "vald-agent"); tonic::transport::Server::builder() .add_service(proto::core::v1::agent_server::AgentServer::new(agent)) diff --git a/rust/libs/algorithm/Cargo.toml b/rust/libs/algorithm/Cargo.toml index 199a0c80ba..a91e2d8418 100644 --- a/rust/libs/algorithm/Cargo.toml +++ b/rust/libs/algorithm/Cargo.toml @@ -19,5 +19,9 @@ version = "0.1.0" edition = "2021" [dependencies] +anyhow = "1.0.88" faiss = { version = "0.1.0", path = "../algorithms/faiss" } ngt = { version = "0.1.0", path = "../algorithms/ngt" } +qbg = { version = "0.1.0", path = "../algorithms/qbg" } +proto = { version = "0.1.0", path = "../proto" } +tonic = "0.12.2" diff --git a/rust/libs/algorithm/src/lib.rs b/rust/libs/algorithm/src/lib.rs index 1f3bce192b..c4141dbe64 100644 --- a/rust/libs/algorithm/src/lib.rs +++ b/rust/libs/algorithm/src/lib.rs @@ -13,17 +13,36 @@ // See the License for the specific language governing permissions and // limitations under the License. // -pub fn add(left: u64, right: u64) -> u64 { - left + right +use std::{error, fmt}; + +use anyhow::Result; +use proto::payload::v1::search; + +#[derive(Debug)] +pub enum Error { + CreateIndexingIsInProgress{}, + FlushingIsInProgress{}, + EmptySearchResult{}, + IncompatibleDimensionSize{got: usize, want: usize}, + + Unknown{}, } -#[cfg(test)] -mod tests { - use super::*; +impl error::Error for Error {} - #[test] - fn it_works() { - let result = add(2, 2); - assert_eq!(result, 4); +impl fmt::Display for Error { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Error::CreateIndexingIsInProgress{} => write!(f, "create indexing is in progress"), + Error::FlushingIsInProgress{} => write!(f, "flush is in progress"), + Error::EmptySearchResult{} => write!(f, "search result is empty"), + Error::IncompatibleDimensionSize { got, want } => write!(f, "incompatible dimension size detected\trequested: {},\tconfigured: {}", got, want), + Error::Unknown { } => write!(f, "unknown error") + } } } + +pub trait ANN: Send + Sync { + fn get_dimension_size(&self) -> usize; + fn search(&self, vector: Vec, dim: u32, epsilon: f32, radius: f32) -> Result, Error>; +} diff --git a/rust/libs/algorithms/ngt/Cargo.toml b/rust/libs/algorithms/ngt/Cargo.toml index 5ce44b6679..d1db093977 100644 --- a/rust/libs/algorithms/ngt/Cargo.toml +++ b/rust/libs/algorithms/ngt/Cargo.toml @@ -19,11 +19,11 @@ version = "0.1.0" edition = "2021" [dependencies] -anyhow = "1.0.86" -cxx = { version = "1.0.126", features = ["c++20"] } +anyhow = "1.0.88" +cxx = { version = "1.0.128", features = ["c++20"] } [build-dependencies] -cxx-build = "1.0.126" +cxx-build = "1.0.128" miette = { version = "7.2.0", features = ["fancy"] } [dev-dependencies] diff --git a/rust/libs/proto/Cargo.toml b/rust/libs/proto/Cargo.toml index 8db9d1284b..5e1ebb4b45 100644 --- a/rust/libs/proto/Cargo.toml +++ b/rust/libs/proto/Cargo.toml @@ -22,7 +22,7 @@ edition = "2021" [dependencies] futures-core = "0.3.30" -prost = "0.13.1" +prost = "0.13.2" prost-types = "0.13.2" -tonic = "0.12.1" -tonic-types = "0.12.1" +tonic = "0.12.2" +tonic-types = "0.12.2" diff --git a/rust/libs/proto/src/lib.rs b/rust/libs/proto/src/lib.rs index 24454d5932..a62e4a873d 100644 --- a/rust/libs/proto/src/lib.rs +++ b/rust/libs/proto/src/lib.rs @@ -16,6 +16,7 @@ pub mod google { pub mod rpc { pub type Status = tonic_types::Status; +// include!("rpc.v1.rs"); } } diff --git a/rust/libs/proto/src/payload.v1.rs b/rust/libs/proto/src/payload.v1.rs index bd5b2ee296..f2f30a38d8 100644 --- a/rust/libs/proto/src/payload.v1.rs +++ b/rust/libs/proto/src/payload.v1.rs @@ -30,6 +30,10 @@ pub mod search { #[prost(message, optional, tag="2")] pub config: ::core::option::Option, } +impl ::prost::Name for Request { +const NAME: &'static str = "Request"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.Request".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.Request".into() }} /// Represent the multiple search request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -38,6 +42,10 @@ pub mod search { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiRequest { +const NAME: &'static str = "MultiRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.MultiRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.MultiRequest".into() }} /// Represent a search by ID request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -49,6 +57,10 @@ pub mod search { #[prost(message, optional, tag="2")] pub config: ::core::option::Option, } +impl ::prost::Name for IdRequest { +const NAME: &'static str = "IDRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.IDRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.IDRequest".into() }} /// Represent the multiple search by ID request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -57,6 +69,10 @@ pub mod search { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiIdRequest { +const NAME: &'static str = "MultiIDRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.MultiIDRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.MultiIDRequest".into() }} /// Represent a search by binary object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -71,6 +87,10 @@ pub mod search { #[prost(message, optional, tag="3")] pub vectorizer: ::core::option::Option, } +impl ::prost::Name for ObjectRequest { +const NAME: &'static str = "ObjectRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.ObjectRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.ObjectRequest".into() }} /// Represent the multiple search by binary object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -79,6 +99,10 @@ pub mod search { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiObjectRequest { +const NAME: &'static str = "MultiObjectRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.MultiObjectRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.MultiObjectRequest".into() }} /// Represent search configuration. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -117,6 +141,10 @@ pub mod search { #[prost(uint32, tag="11")] pub nprobe: u32, } +impl ::prost::Name for Config { +const NAME: &'static str = "Config"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.Config".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.Config".into() }} /// Represent a search response. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -128,6 +156,10 @@ pub mod search { #[prost(message, repeated, tag="2")] pub results: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Response { +const NAME: &'static str = "Response"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.Response".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.Response".into() }} /// Represent multiple search responses. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -136,6 +168,10 @@ pub mod search { #[prost(message, repeated, tag="1")] pub responses: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Responses { +const NAME: &'static str = "Responses"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.Responses".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.Responses".into() }} /// Represent stream search response. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -156,6 +192,10 @@ pub mod search { Status(super::super::super::super::google::rpc::Status), } } +impl ::prost::Name for StreamResponse { +const NAME: &'static str = "StreamResponse"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search.StreamResponse".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search.StreamResponse".into() }} /// AggregationAlgorithm is enum of each aggregation algorithms #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] @@ -193,6 +233,10 @@ pub mod search { } } } +impl ::prost::Name for Search { +const NAME: &'static str = "Search"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Search".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Search".into() }} /// Filter related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -211,6 +255,10 @@ pub mod filter { #[prost(uint32, tag="2")] pub port: u32, } +impl ::prost::Name for Target { +const NAME: &'static str = "Target"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Filter.Target".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Filter.Target".into() }} /// Represent filter configuration. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -219,7 +267,15 @@ pub mod filter { #[prost(message, repeated, tag="1")] pub targets: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Config { +const NAME: &'static str = "Config"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Filter.Config".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Filter.Config".into() }} } +impl ::prost::Name for Filter { +const NAME: &'static str = "Filter"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Filter".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Filter".into() }} /// Insert related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -238,6 +294,10 @@ pub mod insert { #[prost(message, optional, tag="2")] pub config: ::core::option::Option, } +impl ::prost::Name for Request { +const NAME: &'static str = "Request"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Insert.Request".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Insert.Request".into() }} /// Represent the multiple insert request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -246,6 +306,10 @@ pub mod insert { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiRequest { +const NAME: &'static str = "MultiRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Insert.MultiRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Insert.MultiRequest".into() }} /// Represent the insert by binary object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -260,6 +324,10 @@ pub mod insert { #[prost(message, optional, tag="3")] pub vectorizer: ::core::option::Option, } +impl ::prost::Name for ObjectRequest { +const NAME: &'static str = "ObjectRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Insert.ObjectRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Insert.ObjectRequest".into() }} /// Represent the multiple insert by binary object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -268,6 +336,10 @@ pub mod insert { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiObjectRequest { +const NAME: &'static str = "MultiObjectRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Insert.MultiObjectRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Insert.MultiObjectRequest".into() }} /// Represent insert configurations. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -282,7 +354,15 @@ pub mod insert { #[prost(int64, tag="3")] pub timestamp: i64, } +impl ::prost::Name for Config { +const NAME: &'static str = "Config"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Insert.Config".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Insert.Config".into() }} } +impl ::prost::Name for Insert { +const NAME: &'static str = "Insert"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Insert".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Insert".into() }} /// Update related messages #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -301,6 +381,10 @@ pub mod update { #[prost(message, optional, tag="2")] pub config: ::core::option::Option, } +impl ::prost::Name for Request { +const NAME: &'static str = "Request"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Update.Request".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Update.Request".into() }} /// Represent the multiple update request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -309,6 +393,10 @@ pub mod update { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiRequest { +const NAME: &'static str = "MultiRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Update.MultiRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Update.MultiRequest".into() }} /// Represent the update binary object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -323,6 +411,10 @@ pub mod update { #[prost(message, optional, tag="3")] pub vectorizer: ::core::option::Option, } +impl ::prost::Name for ObjectRequest { +const NAME: &'static str = "ObjectRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Update.ObjectRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Update.ObjectRequest".into() }} /// Represent the multiple update binary object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -331,6 +423,28 @@ pub mod update { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiObjectRequest { +const NAME: &'static str = "MultiObjectRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Update.MultiObjectRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Update.MultiObjectRequest".into() }} + /// Represent a vector meta data. + #[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] + pub struct TimestampRequest { + /// The vector ID. + #[prost(string, tag="1")] + pub id: ::prost::alloc::string::String, + /// timestamp represents when this vector inserted. + #[prost(int64, tag="2")] + pub timestamp: i64, + /// force represents forcefully update the timestamp. + #[prost(bool, tag="3")] + pub force: bool, + } +impl ::prost::Name for TimestampRequest { +const NAME: &'static str = "TimestampRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Update.TimestampRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Update.TimestampRequest".into() }} /// Represent the update configuration. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -349,7 +463,15 @@ pub mod update { #[prost(bool, tag="4")] pub disable_balanced_update: bool, } +impl ::prost::Name for Config { +const NAME: &'static str = "Config"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Update.Config".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Update.Config".into() }} } +impl ::prost::Name for Update { +const NAME: &'static str = "Update"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Update".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Update".into() }} /// Upsert related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -368,6 +490,10 @@ pub mod upsert { #[prost(message, optional, tag="2")] pub config: ::core::option::Option, } +impl ::prost::Name for Request { +const NAME: &'static str = "Request"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Upsert.Request".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Upsert.Request".into() }} /// Represent mthe ultiple upsert request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -376,6 +502,10 @@ pub mod upsert { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiRequest { +const NAME: &'static str = "MultiRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Upsert.MultiRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Upsert.MultiRequest".into() }} /// Represent the upsert binary object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -390,6 +520,10 @@ pub mod upsert { #[prost(message, optional, tag="3")] pub vectorizer: ::core::option::Option, } +impl ::prost::Name for ObjectRequest { +const NAME: &'static str = "ObjectRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Upsert.ObjectRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Upsert.ObjectRequest".into() }} /// Represent the multiple upsert binary object request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -398,6 +532,10 @@ pub mod upsert { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiObjectRequest { +const NAME: &'static str = "MultiObjectRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Upsert.MultiObjectRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Upsert.MultiObjectRequest".into() }} /// Represent the upsert configuration. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -416,7 +554,15 @@ pub mod upsert { #[prost(bool, tag="4")] pub disable_balanced_update: bool, } +impl ::prost::Name for Config { +const NAME: &'static str = "Config"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Upsert.Config".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Upsert.Config".into() }} } +impl ::prost::Name for Upsert { +const NAME: &'static str = "Upsert"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Upsert".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Upsert".into() }} /// Remove related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -435,6 +581,10 @@ pub mod remove { #[prost(message, optional, tag="2")] pub config: ::core::option::Option, } +impl ::prost::Name for Request { +const NAME: &'static str = "Request"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Remove.Request".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Remove.Request".into() }} /// Represent the multiple remove request. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -443,6 +593,10 @@ pub mod remove { #[prost(message, repeated, tag="1")] pub requests: ::prost::alloc::vec::Vec, } +impl ::prost::Name for MultiRequest { +const NAME: &'static str = "MultiRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Remove.MultiRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Remove.MultiRequest".into() }} /// Represent the remove request based on timestamp. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -452,6 +606,10 @@ pub mod remove { #[prost(message, repeated, tag="1")] pub timestamps: ::prost::alloc::vec::Vec, } +impl ::prost::Name for TimestampRequest { +const NAME: &'static str = "TimestampRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Remove.TimestampRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Remove.TimestampRequest".into() }} /// Represent the timestamp comparison. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -513,6 +671,10 @@ pub mod remove { } } } +impl ::prost::Name for Timestamp { +const NAME: &'static str = "Timestamp"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Remove.Timestamp".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Remove.Timestamp".into() }} /// Represent the remove configuration. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -524,7 +686,15 @@ pub mod remove { #[prost(int64, tag="3")] pub timestamp: i64, } +impl ::prost::Name for Config { +const NAME: &'static str = "Config"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Remove.Config".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Remove.Config".into() }} } +impl ::prost::Name for Remove { +const NAME: &'static str = "Remove"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Remove".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Remove".into() }} /// Flush related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -536,7 +706,15 @@ pub mod flush { #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Request { } +impl ::prost::Name for Request { +const NAME: &'static str = "Request"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Flush.Request".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Flush.Request".into() }} } +impl ::prost::Name for Flush { +const NAME: &'static str = "Flush"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Flush".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Flush".into() }} /// Common messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -555,6 +733,10 @@ pub mod object { #[prost(message, optional, tag="2")] pub filters: ::core::option::Option, } +impl ::prost::Name for VectorRequest { +const NAME: &'static str = "VectorRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.VectorRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.VectorRequest".into() }} /// Represent the ID and distance pair. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -566,6 +748,10 @@ pub mod object { #[prost(float, tag="2")] pub distance: f32, } +impl ::prost::Name for Distance { +const NAME: &'static str = "Distance"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.Distance".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.Distance".into() }} /// Represent stream response of distances. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -586,6 +772,10 @@ pub mod object { Status(super::super::super::super::google::rpc::Status), } } +impl ::prost::Name for StreamDistance { +const NAME: &'static str = "StreamDistance"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.StreamDistance".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.StreamDistance".into() }} /// Represent the vector ID. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -593,6 +783,10 @@ pub mod object { #[prost(string, tag="1")] pub id: ::prost::alloc::string::String, } +impl ::prost::Name for Id { +const NAME: &'static str = "ID"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.ID".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.ID".into() }} /// Represent multiple vector IDs. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -600,6 +794,10 @@ pub mod object { #[prost(string, repeated, tag="1")] pub ids: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } +impl ::prost::Name for IDs { +const NAME: &'static str = "IDs"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.IDs".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.IDs".into() }} /// Represent a vector. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -614,6 +812,10 @@ pub mod object { #[prost(int64, tag="3")] pub timestamp: i64, } +impl ::prost::Name for Vector { +const NAME: &'static str = "Vector"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.Vector".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.Vector".into() }} /// Represent a request to fetch vector meta data. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -622,6 +824,10 @@ pub mod object { #[prost(message, optional, tag="1")] pub id: ::core::option::Option, } +impl ::prost::Name for TimestampRequest { +const NAME: &'static str = "TimestampRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.TimestampRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.TimestampRequest".into() }} /// Represent a vector meta data. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -633,6 +839,10 @@ pub mod object { #[prost(int64, tag="2")] pub timestamp: i64, } +impl ::prost::Name for Timestamp { +const NAME: &'static str = "Timestamp"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.Timestamp".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.Timestamp".into() }} /// Represent multiple vectors. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -640,6 +850,10 @@ pub mod object { #[prost(message, repeated, tag="1")] pub vectors: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Vectors { +const NAME: &'static str = "Vectors"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.Vectors".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.Vectors".into() }} /// Represent stream response of the vector. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -660,6 +874,10 @@ pub mod object { Status(super::super::super::super::google::rpc::Status), } } +impl ::prost::Name for StreamVector { +const NAME: &'static str = "StreamVector"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.StreamVector".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.StreamVector".into() }} /// Represent reshape vector. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -671,6 +889,10 @@ pub mod object { #[prost(int32, repeated, tag="2")] pub shape: ::prost::alloc::vec::Vec, } +impl ::prost::Name for ReshapeVector { +const NAME: &'static str = "ReshapeVector"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.ReshapeVector".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.ReshapeVector".into() }} /// Represent the binary object. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -682,6 +904,10 @@ pub mod object { #[prost(bytes="vec", tag="2")] pub object: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Blob { +const NAME: &'static str = "Blob"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.Blob".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.Blob".into() }} /// Represent stream response of binary objects. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -702,6 +928,10 @@ pub mod object { Status(super::super::super::super::google::rpc::Status), } } +impl ::prost::Name for StreamBlob { +const NAME: &'static str = "StreamBlob"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.StreamBlob".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.StreamBlob".into() }} /// Represent the vector location. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -716,6 +946,10 @@ pub mod object { #[prost(string, repeated, tag="3")] pub ips: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } +impl ::prost::Name for Location { +const NAME: &'static str = "Location"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.Location".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.Location".into() }} /// Represent the stream response of the vector location. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -736,6 +970,10 @@ pub mod object { Status(super::super::super::super::google::rpc::Status), } } +impl ::prost::Name for StreamLocation { +const NAME: &'static str = "StreamLocation"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.StreamLocation".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.StreamLocation".into() }} /// Represent multiple vector locations. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -743,6 +981,10 @@ pub mod object { #[prost(message, repeated, tag="1")] pub locations: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Locations { +const NAME: &'static str = "Locations"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.Locations".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.Locations".into() }} /// Represent the list object vector stream request and response. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -754,6 +996,10 @@ pub mod object { #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Request { } +impl ::prost::Name for Request { +const NAME: &'static str = "Request"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.List.Request".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.List.Request".into() }} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Response { @@ -773,8 +1019,20 @@ pub mod object { Status(super::super::super::super::super::google::rpc::Status), } } - } +impl ::prost::Name for Response { +const NAME: &'static str = "Response"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.List.Response".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.List.Response".into() }} + } +impl ::prost::Name for List { +const NAME: &'static str = "List"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object.List".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object.List".into() }} } +impl ::prost::Name for Object { +const NAME: &'static str = "Object"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Object".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Object".into() }} /// Control related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -790,7 +1048,15 @@ pub mod control { #[prost(uint32, tag="1")] pub pool_size: u32, } +impl ::prost::Name for CreateIndexRequest { +const NAME: &'static str = "CreateIndexRequest"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Control.CreateIndexRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Control.CreateIndexRequest".into() }} } +impl ::prost::Name for Control { +const NAME: &'static str = "Control"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Control".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Control".into() }} /// Discoverer related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -812,7 +1078,15 @@ pub mod discoverer { #[prost(string, tag="3")] pub node: ::prost::alloc::string::String, } +impl ::prost::Name for Request { +const NAME: &'static str = "Request"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Discoverer.Request".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Discoverer.Request".into() }} } +impl ::prost::Name for Discoverer { +const NAME: &'static str = "Discoverer"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Discoverer".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Discoverer".into() }} /// Info related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -844,6 +1118,10 @@ pub mod info { #[prost(bool, tag="4")] pub saving: bool, } +impl ::prost::Name for Count { +const NAME: &'static str = "Count"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.Count".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.Count".into() }} /// Represent the index count for each Agents message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -858,6 +1136,10 @@ pub mod info { #[prost(uint32, tag="3")] pub live_agents: u32, } +impl ::prost::Name for Detail { +const NAME: &'static str = "Detail"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.Detail".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.Detail".into() }} /// Represent the UUID message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -872,6 +1154,10 @@ pub mod info { #[prost(string, tag="1")] pub uuid: ::prost::alloc::string::String, } +impl ::prost::Name for Committed { +const NAME: &'static str = "Committed"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.UUID.Committed".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.UUID.Committed".into() }} /// The uncommitted UUID. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -879,7 +1165,15 @@ pub mod info { #[prost(string, tag="1")] pub uuid: ::prost::alloc::string::String, } +impl ::prost::Name for Uncommitted { +const NAME: &'static str = "Uncommitted"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.UUID.Uncommitted".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.UUID.Uncommitted".into() }} } +impl ::prost::Name for Uuid { +const NAME: &'static str = "UUID"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.UUID".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.UUID".into() }} /// Represents index Statistics #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -951,6 +1245,10 @@ pub mod info { #[prost(uint64, repeated, tag="33")] pub indegree_histogram: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Statistics { +const NAME: &'static str = "Statistics"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.Statistics".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.Statistics".into() }} /// Represents index Statistics for each Agents #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -959,6 +1257,10 @@ pub mod info { #[prost(map="string, message", tag="1")] pub details: ::std::collections::HashMap<::prost::alloc::string::String, Statistics>, } +impl ::prost::Name for StatisticsDetail { +const NAME: &'static str = "StatisticsDetail"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.StatisticsDetail".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.StatisticsDetail".into() }} /// Represents index Property #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1032,6 +1334,10 @@ pub mod info { #[prost(int32, tag="34")] pub incoming_edge: i32, } +impl ::prost::Name for Property { +const NAME: &'static str = "Property"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.Property".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.Property".into() }} /// Represents index Properties for each Agents #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1039,7 +1345,15 @@ pub mod info { #[prost(map="string, message", tag="1")] pub details: ::std::collections::HashMap<::prost::alloc::string::String, Property>, } - } +impl ::prost::Name for PropertyDetail { +const NAME: &'static str = "PropertyDetail"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index.PropertyDetail".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index.PropertyDetail".into() }} + } +impl ::prost::Name for Index { +const NAME: &'static str = "Index"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Index".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Index".into() }} /// Represent the pod information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1066,6 +1380,10 @@ pub mod info { #[prost(message, optional, tag="7")] pub node: ::core::option::Option, } +impl ::prost::Name for Pod { +const NAME: &'static str = "Pod"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Pod".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Pod".into() }} /// Represent the node information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1089,6 +1407,10 @@ pub mod info { #[prost(message, optional, tag="6")] pub pods: ::core::option::Option, } +impl ::prost::Name for Node { +const NAME: &'static str = "Node"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Node".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Node".into() }} /// Represent the service information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1112,6 +1434,10 @@ pub mod info { #[prost(message, optional, tag="6")] pub annotations: ::core::option::Option, } +impl ::prost::Name for Service { +const NAME: &'static str = "Service"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Service".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Service".into() }} /// Represets the service port information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1123,6 +1449,10 @@ pub mod info { #[prost(int32, tag="2")] pub port: i32, } +impl ::prost::Name for ServicePort { +const NAME: &'static str = "ServicePort"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.ServicePort".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.ServicePort".into() }} /// Represent the kubernetes labels. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1130,6 +1460,10 @@ pub mod info { #[prost(map="string, string", tag="1")] pub labels: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } +impl ::prost::Name for Labels { +const NAME: &'static str = "Labels"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Labels".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Labels".into() }} /// Represent the kubernetes annotations. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1137,6 +1471,10 @@ pub mod info { #[prost(map="string, string", tag="1")] pub annotations: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } +impl ::prost::Name for Annotations { +const NAME: &'static str = "Annotations"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Annotations".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Annotations".into() }} /// Represent the CPU information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -1151,6 +1489,10 @@ pub mod info { #[prost(double, tag="3")] pub usage: f64, } +impl ::prost::Name for Cpu { +const NAME: &'static str = "CPU"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.CPU".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.CPU".into() }} /// Represent the memory information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -1165,6 +1507,10 @@ pub mod info { #[prost(double, tag="3")] pub usage: f64, } +impl ::prost::Name for Memory { +const NAME: &'static str = "Memory"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Memory".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Memory".into() }} /// Represent the multiple pod information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1173,6 +1519,10 @@ pub mod info { #[prost(message, repeated, tag="1")] pub pods: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Pods { +const NAME: &'static str = "Pods"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Pods".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Pods".into() }} /// Represent the multiple node information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1181,6 +1531,10 @@ pub mod info { #[prost(message, repeated, tag="1")] pub nodes: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Nodes { +const NAME: &'static str = "Nodes"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Nodes".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Nodes".into() }} /// Represent the multiple service information message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1189,6 +1543,10 @@ pub mod info { #[prost(message, repeated, tag="1")] pub services: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Services { +const NAME: &'static str = "Services"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.Services".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.Services".into() }} /// Represent the multiple IP message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1196,7 +1554,15 @@ pub mod info { #[prost(string, repeated, tag="1")] pub ip: ::prost::alloc::vec::Vec<::prost::alloc::string::String>, } +impl ::prost::Name for IPs { +const NAME: &'static str = "IPs"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info.IPs".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info.IPs".into() }} } +impl ::prost::Name for Info { +const NAME: &'static str = "Info"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Info".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Info".into() }} /// Mirror related messages. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] @@ -1215,6 +1581,10 @@ pub mod mirror { #[prost(uint32, tag="2")] pub port: u32, } +impl ::prost::Name for Target { +const NAME: &'static str = "Target"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Mirror.Target".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Mirror.Target".into() }} /// Represent the multiple Target message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -1223,7 +1593,15 @@ pub mod mirror { #[prost(message, repeated, tag="1")] pub targets: ::prost::alloc::vec::Vec, } +impl ::prost::Name for Targets { +const NAME: &'static str = "Targets"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Mirror.Targets".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Mirror.Targets".into() }} } +impl ::prost::Name for Mirror { +const NAME: &'static str = "Mirror"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Mirror".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Mirror".into() }} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Meta { @@ -1236,12 +1614,20 @@ pub mod meta { #[prost(string, tag="1")] pub key: ::prost::alloc::string::String, } +impl ::prost::Name for Key { +const NAME: &'static str = "Key"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Meta.Key".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Meta.Key".into() }} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct Value { #[prost(message, optional, tag="1")] pub value: ::core::option::Option<::prost_types::Any>, } +impl ::prost::Name for Value { +const NAME: &'static str = "Value"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Meta.Value".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Meta.Value".into() }} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct KeyValue { @@ -1250,10 +1636,22 @@ pub mod meta { #[prost(message, optional, tag="2")] pub value: ::core::option::Option, } +impl ::prost::Name for KeyValue { +const NAME: &'static str = "KeyValue"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Meta.KeyValue".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Meta.KeyValue".into() }} } +impl ::prost::Name for Meta { +const NAME: &'static str = "Meta"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Meta".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Meta".into() }} /// Represent an empty message. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, Copy, PartialEq, ::prost::Message)] pub struct Empty { } +impl ::prost::Name for Empty { +const NAME: &'static str = "Empty"; +const PACKAGE: &'static str = "payload.v1"; +fn full_name() -> ::prost::alloc::string::String { "payload.v1.Empty".into() }fn type_url() -> ::prost::alloc::string::String { "/payload.v1.Empty".into() }} // @@protoc_insertion_point(module) diff --git a/rust/libs/proto/src/rpc.v1.rs b/rust/libs/proto/src/rpc.v1.rs index ad4ac26f0f..681730423c 100644 --- a/rust/libs/proto/src/rpc.v1.rs +++ b/rust/libs/proto/src/rpc.v1.rs @@ -42,6 +42,10 @@ pub struct ErrorInfo { #[prost(map="string, string", tag="3")] pub metadata: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::string::String>, } +impl ::prost::Name for ErrorInfo { +const NAME: &'static str = "ErrorInfo"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.ErrorInfo".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.ErrorInfo".into() }} /// Describes when the clients can retry a failed request. Clients could ignore /// the recommendation here or retry when this information is missing from error /// responses. @@ -62,6 +66,10 @@ pub struct RetryInfo { #[prost(message, optional, tag="1")] pub retry_delay: ::core::option::Option<::prost_types::Duration>, } +impl ::prost::Name for RetryInfo { +const NAME: &'static str = "RetryInfo"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.RetryInfo".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.RetryInfo".into() }} /// Describes additional debugging info. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -73,6 +81,10 @@ pub struct DebugInfo { #[prost(string, tag="2")] pub detail: ::prost::alloc::string::String, } +impl ::prost::Name for DebugInfo { +const NAME: &'static str = "DebugInfo"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.DebugInfo".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.DebugInfo".into() }} /// Describes how a quota check failed. /// /// For example if a daily limit was exceeded for the calling project, @@ -113,7 +125,15 @@ pub mod quota_failure { #[prost(string, tag="2")] pub description: ::prost::alloc::string::String, } +impl ::prost::Name for Violation { +const NAME: &'static str = "Violation"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.QuotaFailure.Violation".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.QuotaFailure.Violation".into() }} } +impl ::prost::Name for QuotaFailure { +const NAME: &'static str = "QuotaFailure"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.QuotaFailure".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.QuotaFailure".into() }} /// Describes what preconditions have failed. /// /// For example, if an RPC failed because it required the Terms of Service to be @@ -149,7 +169,15 @@ pub mod precondition_failure { #[prost(string, tag="3")] pub description: ::prost::alloc::string::String, } +impl ::prost::Name for Violation { +const NAME: &'static str = "Violation"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.PreconditionFailure.Violation".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.PreconditionFailure.Violation".into() }} } +impl ::prost::Name for PreconditionFailure { +const NAME: &'static str = "PreconditionFailure"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.PreconditionFailure".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.PreconditionFailure".into() }} /// Describes violations in a client request. This error type focuses on the /// syntactic aspects of the request. #[allow(clippy::derive_partial_eq_without_eq)] @@ -208,7 +236,15 @@ pub mod bad_request { #[prost(string, tag="2")] pub description: ::prost::alloc::string::String, } +impl ::prost::Name for FieldViolation { +const NAME: &'static str = "FieldViolation"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.BadRequest.FieldViolation".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.BadRequest.FieldViolation".into() }} } +impl ::prost::Name for BadRequest { +const NAME: &'static str = "BadRequest"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.BadRequest".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.BadRequest".into() }} /// Contains metadata about the request that clients can attach when filing a bug /// or providing other forms of feedback. #[allow(clippy::derive_partial_eq_without_eq)] @@ -223,6 +259,10 @@ pub struct RequestInfo { #[prost(string, tag="2")] pub serving_data: ::prost::alloc::string::String, } +impl ::prost::Name for RequestInfo { +const NAME: &'static str = "RequestInfo"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.RequestInfo".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.RequestInfo".into() }} /// Describes the resource that is being accessed. #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -249,6 +289,10 @@ pub struct ResourceInfo { #[prost(string, tag="4")] pub description: ::prost::alloc::string::String, } +impl ::prost::Name for ResourceInfo { +const NAME: &'static str = "ResourceInfo"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.ResourceInfo".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.ResourceInfo".into() }} /// Provides links to documentation or for performing an out of band action. /// /// For example, if a quota check failed with an error indicating the calling @@ -274,7 +318,15 @@ pub mod help { #[prost(string, tag="2")] pub url: ::prost::alloc::string::String, } +impl ::prost::Name for Link { +const NAME: &'static str = "Link"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.Help.Link".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.Help.Link".into() }} } +impl ::prost::Name for Help { +const NAME: &'static str = "Help"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.Help".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.Help".into() }} /// Provides a localized error message that is safe to return to the user /// which can be attached to an RPC error. #[allow(clippy::derive_partial_eq_without_eq)] @@ -289,4 +341,8 @@ pub struct LocalizedMessage { #[prost(string, tag="2")] pub message: ::prost::alloc::string::String, } +impl ::prost::Name for LocalizedMessage { +const NAME: &'static str = "LocalizedMessage"; +const PACKAGE: &'static str = "rpc.v1"; +fn full_name() -> ::prost::alloc::string::String { "rpc.v1.LocalizedMessage".into() }fn type_url() -> ::prost::alloc::string::String { "/rpc.v1.LocalizedMessage".into() }} // @@protoc_insertion_point(module) diff --git a/rust/libs/proto/src/vald.v1.tonic.rs b/rust/libs/proto/src/vald.v1.tonic.rs index af0114a23e..d7160aa304 100644 --- a/rust/libs/proto/src/vald.v1.tonic.rs +++ b/rust/libs/proto/src/vald.v1.tonic.rs @@ -5415,6 +5415,35 @@ pub mod update_client { .insert(GrpcMethod::new("vald.v1.Update", "MultiUpdate")); self.inner.unary(req, path, codec).await } + /** A method to update timestamp an indexed vector. +*/ + pub async fn update_timestamp( + &mut self, + request: impl tonic::IntoRequest< + super::super::super::payload::v1::update::TimestampRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/vald.v1.Update/UpdateTimestamp", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("vald.v1.Update", "UpdateTimestamp")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -5462,6 +5491,17 @@ pub mod update_server { tonic::Response, tonic::Status, >; + /** A method to update timestamp an indexed vector. +*/ + async fn update_timestamp( + &self, + request: tonic::Request< + super::super::super::payload::v1::update::TimestampRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } #[derive(Debug)] pub struct UpdateServer { @@ -5686,6 +5726,54 @@ pub mod update_server { }; Box::pin(fut) } + "/vald.v1.Update/UpdateTimestamp" => { + #[allow(non_camel_case_types)] + struct UpdateTimestampSvc(pub Arc); + impl< + T: Update, + > tonic::server::UnaryService< + super::super::super::payload::v1::update::TimestampRequest, + > for UpdateTimestampSvc { + type Response = super::super::super::payload::v1::object::Location; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::super::payload::v1::update::TimestampRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::update_timestamp(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = UpdateTimestampSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( diff --git a/rust/rust-toolchain b/rust/rust-toolchain index aaceec04e0..6b4de0a42b 100644 --- a/rust/rust-toolchain +++ b/rust/rust-toolchain @@ -1 +1 @@ -1.80.0 +1.83.0 diff --git a/rust/rust-toolchain.toml b/rust/rust-toolchain.toml index 772b741528..b8d04fd921 100644 --- a/rust/rust-toolchain.toml +++ b/rust/rust-toolchain.toml @@ -14,4 +14,4 @@ # limitations under the License. # [toolchain] -channel = "1.80.0" +channel = "1.83.0" diff --git a/tests/chaos/chart/README.md b/tests/chaos/chart/README.md index a142fc4136..14acc560ca 100644 --- a/tests/chaos/chart/README.md +++ b/tests/chaos/chart/README.md @@ -41,4 +41,4 @@ A Helm chart for testing Vald using Chaos Mesh. --- -Autogenerated from chart metadata using [helm-docs v1.13.1](https://github.com/norwoodj/helm-docs/releases/v1.13.1) +Autogenerated from chart metadata using [helm-docs v1.14.2](https://github.com/norwoodj/helm-docs/releases/v1.14.2) diff --git a/tests/e2e/crud/crud_faiss_test.go b/tests/e2e/crud/crud_faiss_test.go index c0a193975f..0ba526bb26 100644 --- a/tests/e2e/crud/crud_faiss_test.go +++ b/tests/e2e/crud/crud_faiss_test.go @@ -190,7 +190,7 @@ func TestE2EUpdateOnly(t *testing.T) { t.Helper() if status != int32(codes.NotFound) { - return errors.Errorf("the returned status is not NotFound on Update #1: %s", err) + return errors.Errorf("the returned status is not NotFound on Update #1: %s,\tcode: %s", msg, status) } t.Logf("received a NotFound error on #1: %s", msg) @@ -300,7 +300,7 @@ func TestE2EStandardCRUD(t *testing.T) { t.Helper() if status != int32(codes.NotFound) { - return errors.Errorf("the returned status is not NotFound on Update #1: %s", err) + return errors.Errorf("the returned status is not NotFound on Update #1: %s,\tcode: %s", msg, status) } t.Logf("received a NotFound error on #1: %s", msg) diff --git a/tests/e2e/crud/crud_test.go b/tests/e2e/crud/crud_test.go index 0805c91f4b..bfda87cdd9 100644 --- a/tests/e2e/crud/crud_test.go +++ b/tests/e2e/crud/crud_test.go @@ -25,6 +25,7 @@ import ( "fmt" "os" "os/exec" + "strings" "testing" "time" @@ -415,7 +416,7 @@ func TestE2EStandardCRUD(t *testing.T) { err = op.Flush(t, ctx) if err != nil { - // TODO: Remove code check afeter Flush API is available for agent-faiss and mirror-gateway + // TODO: Remove code check after Flush API is available for agent-faiss and mirror-gateway st, _, _ := status.ParseError(err, codes.Unknown, "") if st.Code() != codes.Unimplemented { t.Fatalf("an error occurred: %s", err) @@ -445,7 +446,7 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { t.Helper() if status != int32(codes.NotFound) { - return errors.Errorf("the returned status is not NotFound on Update #1: %s", err) + return errors.Errorf("the returned status is not NotFound on Update #1: %s,\tcode: %s", msg, codes.ToString(status)) } t.Logf("received a NotFound error on #1: %s", msg) @@ -480,7 +481,7 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { t.Helper() if status != int32(codes.NotFound) { - return errors.Errorf("the returned status is not NotFound on Update #2: %s", err) + return errors.Errorf("the returned status is not NotFound on Update #2: %s,\tcode: %s", msg, codes.ToString(status)) } t.Logf("received a NotFound error on #2: %s", msg) @@ -519,6 +520,8 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { t.Fatalf("an error occurred on #3: %s", err) } + sleep(t, waitAfterInsertDuration) + // #4 run Update with SkipStrictExistCheck=false & a different vector, and check that it succeeds err = op.UpdateWithParameters( t, @@ -535,6 +538,8 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { t.Fatalf("an error occurred on #4: %s", err) } + sleep(t, waitAfterInsertDuration) + // #5 run Update with SkipStrictExistCheck=false & same vector as 4 and check that AlreadyExists returns err = op.UpdateWithParameters( t, @@ -548,7 +553,7 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { t.Helper() if status != int32(codes.AlreadyExists) { - return errors.Errorf("the returned status is not NotFound on Update #5: %s", err) + return errors.Errorf("the returned status is not AlreadyExists on Update #5: %s,\tcode: %s", msg, codes.ToString(status)) } t.Logf("received an AlreadyExists error on #5: %s", msg) @@ -569,7 +574,7 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { }, ) if err != nil { - t.Fatalf("an error occurred: %s", err) + t.Fatalf("an error occurred on #5: %s", err) } // #6 run Update with the same vector as SkipStrictExistCheck=true & 4 and check that it succeeds @@ -615,7 +620,7 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { t.Helper() if status != int32(codes.NotFound) { - return errors.Errorf("the returned status is not NotFound on Remove #8: %s", err) + return errors.Errorf("the returned status is not NotFound on Remove #8: %s,\tcode: %s", msg, codes.ToString(status)) } t.Logf("received a NotFound error on #8: %s", msg) @@ -651,7 +656,7 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { t.Helper() if status != int32(codes.NotFound) { - return errors.Errorf("the returned status is not NotFound on Remove #9: %s", err) + return errors.Errorf("the returned status is not NotFound on Remove #9: %s,\tcode: %s", msg, codes.ToString(status)) } t.Logf("received a NotFound error on #9: %s", msg) @@ -702,7 +707,7 @@ func TestE2ECRUDWithSkipStrictExistCheck(t *testing.T) { t.Helper() if status != int32(codes.AlreadyExists) { - return errors.Errorf("the returned status is not AlreadyExists on Upsert #11: %s", err) + return errors.Errorf("the returned status is not AlreadyExists on Upsert #11: %s,\tcode: %s", msg, codes.ToString(status)) } t.Logf("received an AlreadyExists error on #11: %s", msg) @@ -798,8 +803,29 @@ func TestE2EIndexJobCorrection(t *testing.T) { } t.Log("Test case 2: execute index correction after one agent removed") - t.Log("removing vald-agent-0...") - cmd := exec.CommandContext(ctx, "sh", "-c", "kubectl delete pod vald-agent-0 && kubectl wait --for=condition=Ready pod/vald-agent-0") + detail, err := op.IndexDetail(t, ctx) + if err != nil { + t.Fatalf("an error occurred: %s", err) + } + if len(detail.Counts) == 0 { + t.Fatal("no pods found with index details") + } + var target string + for a, c := range detail.Counts { + if c.Stored > 0 { + parts := strings.Split(a, ":") + if len(parts) == 0 { + t.Fatalf("invalid address format: %s", a) + } + target = parts[0] + break + } + } + if target == "" { + t.Fatal("no pods found with stored count > 0") + } + + cmd := exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("kubectl get pods -o custom-columns=:metadata.name --no-headers=true --field-selector=\"status.podIP=%s\"", target)) out, err := cmd.Output() if err != nil { if exitErr, ok := err.(*exec.ExitError); ok { @@ -808,6 +834,18 @@ func TestE2EIndexJobCorrection(t *testing.T) { t.Fatalf("unexpected error on creating job: %v", err) } } + agent := strings.TrimRight(string(out), "\n") + + t.Logf("removing %s...", agent) + cmd = exec.CommandContext(ctx, "sh", "-c", fmt.Sprintf("kubectl delete pod %s && kubectl wait --for=condition=Ready pod/%s", agent, agent)) + out, err = cmd.Output() + if err != nil { + if exitErr, ok := err.(*exec.ExitError); ok { + t.Fatalf("%s, %s, %v", string(out), string(exitErr.Stderr), err) + } else { + t.Fatalf("unexpected error on creating job: %v", err) + } + } t.Log(string(out)) // correct the deleted index @@ -865,7 +903,7 @@ func TestE2EReadReplica(t *testing.T) { t.Log("waiting for read replica rotator jobs to complete...") if err := kubectl.WaitResources(ctx, t, "job", "app=vald-readreplica-rotate", "complete", "60s"); err != nil { t.Log("wait failed. printing yaml of vald-readreplica-rotate") - kubectl.KubectlCmd(ctx, t, "get", "pod", "-l", "app=vald-readreplica-rotate", "-oyaml") + kubectl.KubectlCmd(ctx, t, "get", "pod", "-l", "app=vald-readreplica-rotate", "-o", "yaml") t.Log("wait failed. printing log of vald-index-operator") kubectl.DebugLog(ctx, t, "app=vald-index-operator") t.Log("wait failed. printing log of vald-readreplica-rotate") diff --git a/tests/e2e/operation/multi.go b/tests/e2e/operation/multi.go index 5baaa6ff95..5ca5784921 100644 --- a/tests/e2e/operation/multi.go +++ b/tests/e2e/operation/multi.go @@ -24,7 +24,7 @@ import ( ) func (c *client) MultiSearch(t *testing.T, ctx context.Context, ds Dataset) error { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -60,7 +60,7 @@ func (c *client) MultiSearch(t *testing.T, ctx context.Context, ds Dataset) erro } func (c *client) MultiSearchByID(t *testing.T, ctx context.Context, ds Dataset) error { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -96,7 +96,7 @@ func (c *client) MultiSearchByID(t *testing.T, ctx context.Context, ds Dataset) } func (c *client) MultiLinearSearch(t *testing.T, ctx context.Context, ds Dataset) error { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -130,7 +130,7 @@ func (c *client) MultiLinearSearch(t *testing.T, ctx context.Context, ds Dataset } func (c *client) MultiLinearSearchByID(t *testing.T, ctx context.Context, ds Dataset) error { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -164,7 +164,7 @@ func (c *client) MultiLinearSearchByID(t *testing.T, ctx context.Context, ds Dat } func (c *client) MultiInsert(t *testing.T, ctx context.Context, ds Dataset) error { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -201,7 +201,7 @@ func (c *client) MultiInsert(t *testing.T, ctx context.Context, ds Dataset) erro } func (c *client) MultiUpdate(t *testing.T, ctx context.Context, ds Dataset) error { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -241,7 +241,7 @@ func (c *client) MultiUpdate(t *testing.T, ctx context.Context, ds Dataset) erro } func (c *client) MultiUpsert(t *testing.T, ctx context.Context, ds Dataset) error { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -281,7 +281,7 @@ func (c *client) MultiUpsert(t *testing.T, ctx context.Context, ds Dataset) erro } func (c *client) MultiRemove(t *testing.T, ctx context.Context, ds Dataset) error { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } diff --git a/tests/e2e/operation/operation.go b/tests/e2e/operation/operation.go index 0b54ec439e..f8d8ae2027 100644 --- a/tests/e2e/operation/operation.go +++ b/tests/e2e/operation/operation.go @@ -133,6 +133,7 @@ type Client interface { CreateIndex(t *testing.T, ctx context.Context) error SaveIndex(t *testing.T, ctx context.Context) error IndexInfo(t *testing.T, ctx context.Context) (*payload.Info_Index_Count, error) + IndexDetail(t *testing.T, ctx context.Context) (*payload.Info_Index_Detail, error) } type client struct { @@ -150,7 +151,7 @@ func New(host string, port int) (Client, error) { } func (c *client) CreateIndex(t *testing.T, ctx context.Context) error { - client, err := c.getAgentClient(ctx) + client, err := c.getAgentClient() if err != nil { return err } @@ -163,7 +164,7 @@ func (c *client) CreateIndex(t *testing.T, ctx context.Context) error { } func (c *client) SaveIndex(t *testing.T, ctx context.Context) error { - client, err := c.getAgentClient(ctx) + client, err := c.getAgentClient() if err != nil { return err } @@ -174,7 +175,7 @@ func (c *client) SaveIndex(t *testing.T, ctx context.Context) error { } func (c *client) IndexInfo(t *testing.T, ctx context.Context) (*payload.Info_Index_Count, error) { - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return nil, err } @@ -182,9 +183,19 @@ func (c *client) IndexInfo(t *testing.T, ctx context.Context) (*payload.Info_Ind return client.IndexInfo(ctx, &payload.Empty{}) } -func (c *client) getGRPCConn(ctx context.Context) (*grpc.ClientConn, error) { - return grpc.DialContext( - ctx, +func (c *client) IndexDetail( + t *testing.T, ctx context.Context, +) (*payload.Info_Index_Detail, error) { + client, err := c.getClient() + if err != nil { + return nil, err + } + + return client.IndexDetail(ctx, &payload.Empty{}) +} + +func (c *client) getGRPCConn() (*grpc.ClientConn, error) { + return grpc.NewClient( c.host+":"+strconv.Itoa(c.port), grpc.WithInsecure(), grpc.WithKeepaliveParams( @@ -197,8 +208,8 @@ func (c *client) getGRPCConn(ctx context.Context) (*grpc.ClientConn, error) { ) } -func (c *client) getClient(ctx context.Context) (vald.Client, error) { - conn, err := c.getGRPCConn(ctx) +func (c *client) getClient() (vald.Client, error) { + conn, err := c.getGRPCConn() if err != nil { return nil, err } @@ -206,8 +217,8 @@ func (c *client) getClient(ctx context.Context) (vald.Client, error) { return vald.NewValdClient(conn), nil } -func (c *client) getAgentClient(ctx context.Context) (core.AgentClient, error) { - conn, err := c.getGRPCConn(ctx) +func (c *client) getAgentClient() (core.AgentClient, error) { + conn, err := c.getGRPCConn() if err != nil { return nil, err } diff --git a/tests/e2e/operation/stream.go b/tests/e2e/operation/stream.go index 141a9bcca4..757f8e8e61 100644 --- a/tests/e2e/operation/stream.go +++ b/tests/e2e/operation/stream.go @@ -93,7 +93,7 @@ func (c *client) SearchWithParameters( ) (rerr error) { t.Log("search operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -293,7 +293,7 @@ func (c *client) SearchByIDWithParameters( ) (rerr error) { t.Log("searchByID operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -413,7 +413,7 @@ func (c *client) LinearSearchWithParameters( ) (rerr error) { t.Log("linearsearch operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -531,7 +531,7 @@ func (c *client) LinearSearchByIDWithParameters( ) (rerr error) { t.Log("linearsearchByID operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -637,7 +637,7 @@ func (c *client) InsertWithParameters( ) (rerr error) { t.Log("insert operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -740,7 +740,7 @@ func (c *client) UpdateWithParameters( ) (rerr error) { t.Log("update operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -776,13 +776,13 @@ func (c *client) UpdateWithParameters( loc := res.GetLocation() if loc == nil { - status := res.GetStatus() - if status != nil { - if e := svalidator(t, status.GetCode(), status.GetMessage()); e != nil { + st := res.GetStatus() + if st != nil { + if e := svalidator(t, st.GetCode(), st.GetMessage()); e != nil { t.Errorf("an error returned:\tcode: %d\tmessage: %s\tdetails: %s", - status.GetCode(), - status.GetMessage(), - errdetails.Serialize(status.GetDetails())) + st.GetCode(), + st.GetMessage(), + errdetails.Serialize(st.GetDetails())) rerr = errors.Join(rerr, e) } continue @@ -844,7 +844,7 @@ func (c *client) UpsertWithParameters( ) (rerr error) { t.Log("upsert operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -946,7 +946,7 @@ func (c *client) RemoveWithParameters( ) (rerr error) { t.Log("remove operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -1029,7 +1029,7 @@ func (c *client) RemoveWithParameters( func (c *client) Flush(t *testing.T, ctx context.Context) error { t.Log("flush operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -1047,7 +1047,7 @@ func (c *client) Flush(t *testing.T, ctx context.Context) error { func (c *client) RemoveByTimestamp(t *testing.T, ctx context.Context, timestamp int64) error { t.Log("removeByTimestamp operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -1074,7 +1074,7 @@ func (c *client) RemoveByTimestamp(t *testing.T, ctx context.Context, timestamp func (c *client) Exists(t *testing.T, ctx context.Context, id string) error { t.Log("exists operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -1096,7 +1096,7 @@ func (c *client) Exists(t *testing.T, ctx context.Context, id string) error { func (c *client) GetObject(t *testing.T, ctx context.Context, ds Dataset) (rerr error) { t.Log("getObject operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } @@ -1186,7 +1186,7 @@ func (c *client) GetObject(t *testing.T, ctx context.Context, ds Dataset) (rerr func (c *client) StreamListObject(t *testing.T, ctx context.Context, ds Dataset) error { t.Log("StreamListObject operation started") - client, err := c.getClient(ctx) + client, err := c.getClient() if err != nil { return err } diff --git a/tests/e2e/performance/max_vector_dim_test.go b/tests/e2e/performance/max_vector_dim_test.go index 5e0016e4e6..560139b2d2 100644 --- a/tests/e2e/performance/max_vector_dim_test.go +++ b/tests/e2e/performance/max_vector_dim_test.go @@ -125,9 +125,7 @@ func TestE2EInsertOnlyWithOneVectorAndSearch(t *testing.T) { if dim > algorithm.MaximumVectorDimensionSize { t.Fatalf("Invalid argument: dimension should be equal or under than " + strconv.Itoa(algorithm.MaximumVectorDimensionSize) + ". set dim was " + strconv.Itoa(dim)) } - ctx := context.Background() - conn, err := grpc.DialContext( - ctx, + conn, err := grpc.NewClient( net.JoinHostPort(host, uint16(port)), grpc.WithInsecure(), grpc.WithKeepaliveParams( @@ -155,6 +153,7 @@ func TestE2EInsertOnlyWithOneVectorAndSearch(t *testing.T) { SkipStrictExistCheck: false, }, } + ctx := context.Background() _, err = cli.Insert(ctx, req) if err != nil { st, _ := status.FromError(err) diff --git a/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go b/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go index fa857a69cd..4aab0fae3b 100644 --- a/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go +++ b/tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go @@ -147,7 +147,7 @@ func Test_ngt_parallel_delete_and_insert(t *testing.T) { wg.Wait() if n.Len() != maxIDNum { - t.Errorf("inerted id num = %d, want = %d", n.Len(), maxIDNum) + t.Errorf("inserted id num = %d, want = %d", n.Len(), maxIDNum) } for i := int64(0); i < maxIDNum; i++ { @@ -230,7 +230,7 @@ func Test_ngt_parallel_insert_and_delete(t *testing.T) { wg.Wait() if want, got := n.Len(), uint64(0); want != got { - t.Errorf("inerted id num = %d, want = %d", got, want) + t.Errorf("inserted id num = %d, want = %d", got, want) } for i := int64(0); i < maxIDNum; i++ { diff --git a/tests/performance/max_vector_dim_test.go b/tests/performance/max_vector_dim_test.go index ee9bde7ea0..6605eaa856 100644 --- a/tests/performance/max_vector_dim_test.go +++ b/tests/performance/max_vector_dim_test.go @@ -68,17 +68,15 @@ func init_ngt_service(dim int) (service.NGT, error) { } func parse(raw string) (key string, value int) { - text := strings.ReplaceAll(raw[:len(raw)-2], " ", "") - keyValue := strings.Split(text, ":") - val := 0 - if keyValue[1] != "" { - val, err := strconv.Atoi(keyValue[1]) + k, v, ok := strings.Cut(strings.ReplaceAll(raw[:len(raw)-2], " ", ""), ":") + if ok { + val, err := strconv.Atoi(v) if err != nil { - panic(err) + return k, 0 } - return keyValue[0], val + return k, val } - return keyValue[0], val + return k, 0 } func TestMain(m *testing.M) { diff --git a/versions/BUF_VERSION b/versions/BUF_VERSION index 1a7337ae28..7fa3401a10 100644 --- a/versions/BUF_VERSION +++ b/versions/BUF_VERSION @@ -1 +1 @@ -v1.28.1 +v1.47.2 diff --git a/versions/CHAOS_MESH_VERSION b/versions/CHAOS_MESH_VERSION index ec1cf33c3f..24ba9a38de 100644 --- a/versions/CHAOS_MESH_VERSION +++ b/versions/CHAOS_MESH_VERSION @@ -1 +1 @@ -2.6.3 +2.7.0 diff --git a/versions/CMAKE_VERSION b/versions/CMAKE_VERSION index aaa0fde70b..4464a71f41 100644 --- a/versions/CMAKE_VERSION +++ b/versions/CMAKE_VERSION @@ -1 +1 @@ -3.30.2 +3.31.2 diff --git a/versions/DOCKER_VERSION b/versions/DOCKER_VERSION index 96e099462d..b33e0ab2ce 100644 --- a/versions/DOCKER_VERSION +++ b/versions/DOCKER_VERSION @@ -1 +1 @@ -v27.1.1 +v27.4.0 diff --git a/versions/FAISS_VERSION b/versions/FAISS_VERSION index 27f9cd322b..f8e233b273 100644 --- a/versions/FAISS_VERSION +++ b/versions/FAISS_VERSION @@ -1 +1 @@ -1.8.0 +1.9.0 diff --git a/versions/GOLANGCILINT_VERSION b/versions/GOLANGCILINT_VERSION index be33d89791..1ba6c01679 100644 --- a/versions/GOLANGCILINT_VERSION +++ b/versions/GOLANGCILINT_VERSION @@ -1 +1 @@ -v1.59.1 +v1.62.2 diff --git a/versions/GO_VERSION b/versions/GO_VERSION index a6c2798a48..27ddcc14da 100644 --- a/versions/GO_VERSION +++ b/versions/GO_VERSION @@ -1 +1 @@ -1.23.0 +1.23.4 diff --git a/versions/HDF5_VERSION b/versions/HDF5_VERSION index 85b169d0cb..2248db12d7 100644 --- a/versions/HDF5_VERSION +++ b/versions/HDF5_VERSION @@ -1 +1 @@ -hdf5_1.14.4.3 +hdf5_1.14.5 diff --git a/versions/HELM_VERSION b/versions/HELM_VERSION index de3e42fc29..fb201f7029 100644 --- a/versions/HELM_VERSION +++ b/versions/HELM_VERSION @@ -1 +1 @@ -v3.15.3 +v3.16.3 diff --git a/versions/K3S_VERSION b/versions/K3S_VERSION index e777e8936c..89b55cc8ed 100644 --- a/versions/K3S_VERSION +++ b/versions/K3S_VERSION @@ -1 +1 @@ -v1.30.2-k3s2 +v1.31.3-k3s1 diff --git a/versions/KIND_VERSION b/versions/KIND_VERSION index ca222b7cf3..d21d277be5 100644 --- a/versions/KIND_VERSION +++ b/versions/KIND_VERSION @@ -1 +1 @@ -0.23.0 +0.25.0 diff --git a/versions/KUBECTL_VERSION b/versions/KUBECTL_VERSION index 062a7525f1..fbcee7ecad 100644 --- a/versions/KUBECTL_VERSION +++ b/versions/KUBECTL_VERSION @@ -1 +1 @@ -v1.30.3 \ No newline at end of file +v1.32.0 \ No newline at end of file diff --git a/versions/KUBELINTER_VERSION b/versions/KUBELINTER_VERSION index ca46cd2885..63f2359f64 100644 --- a/versions/KUBELINTER_VERSION +++ b/versions/KUBELINTER_VERSION @@ -1 +1 @@ -v0.6.8 +v0.7.1 diff --git a/versions/NGT_VERSION b/versions/NGT_VERSION index 530cdd91a2..cc6c9a491e 100644 --- a/versions/NGT_VERSION +++ b/versions/NGT_VERSION @@ -1 +1 @@ -2.2.4 +2.3.5 diff --git a/versions/OPERATOR_SDK_VERSION b/versions/OPERATOR_SDK_VERSION index 2f2ce0df61..3b9c65a4f5 100644 --- a/versions/OPERATOR_SDK_VERSION +++ b/versions/OPERATOR_SDK_VERSION @@ -1 +1 @@ -v1.33 +v1.38 diff --git a/versions/PROMETHEUS_STACK_VERSION b/versions/PROMETHEUS_STACK_VERSION index 14e3460225..246ba14e4c 100644 --- a/versions/PROMETHEUS_STACK_VERSION +++ b/versions/PROMETHEUS_STACK_VERSION @@ -1 +1 @@ -61.7.1 +66.4.0 diff --git a/versions/PROTOBUF_VERSION b/versions/PROTOBUF_VERSION index 383283e322..f598853579 100644 --- a/versions/PROTOBUF_VERSION +++ b/versions/PROTOBUF_VERSION @@ -1 +1 @@ -27.3 +29.1 diff --git a/versions/REVIEWDOG_VERSION b/versions/REVIEWDOG_VERSION index 2c80271d5a..0c5f589f95 100644 --- a/versions/REVIEWDOG_VERSION +++ b/versions/REVIEWDOG_VERSION @@ -1 +1 @@ -v0.20.1 +v0.20.3 diff --git a/versions/RUST_VERSION b/versions/RUST_VERSION index aaceec04e0..6b4de0a42b 100644 --- a/versions/RUST_VERSION +++ b/versions/RUST_VERSION @@ -1 +1 @@ -1.80.0 +1.83.0 diff --git a/versions/TELEPRESENCE_VERSION b/versions/TELEPRESENCE_VERSION index b8e248f40b..4b0fa5fdcc 100644 --- a/versions/TELEPRESENCE_VERSION +++ b/versions/TELEPRESENCE_VERSION @@ -1 +1 @@ -2.19.1 +2.20.3 diff --git a/versions/USEARCH_VERSION b/versions/USEARCH_VERSION new file mode 100644 index 0000000000..3c5d0106bb --- /dev/null +++ b/versions/USEARCH_VERSION @@ -0,0 +1 @@ +2.16.6 diff --git a/versions/VALD_VERSION b/versions/VALD_VERSION index fd48ae7f98..f8f3bcf53b 100644 --- a/versions/VALD_VERSION +++ b/versions/VALD_VERSION @@ -1 +1 @@ -v1.7.13 +v1.7.16 diff --git a/versions/YQ_VERSION b/versions/YQ_VERSION index bfded4ec17..3d6c7a0cb0 100644 --- a/versions/YQ_VERSION +++ b/versions/YQ_VERSION @@ -1 +1 @@ -v4.44.3 +v4.44.6 diff --git a/versions/actions/ACTIONS_CACHE b/versions/actions/ACTIONS_CACHE index 4d54daddb6..6aba2b245a 100644 --- a/versions/actions/ACTIONS_CACHE +++ b/versions/actions/ACTIONS_CACHE @@ -1 +1 @@ -4.0.2 +4.2.0 diff --git a/versions/actions/ACTIONS_CHECKOUT b/versions/actions/ACTIONS_CHECKOUT index 9edf2a44f4..af8c8ec7c1 100644 --- a/versions/actions/ACTIONS_CHECKOUT +++ b/versions/actions/ACTIONS_CHECKOUT @@ -1 +1 @@ -4.1.7 +4.2.2 diff --git a/versions/actions/ACTIONS_SETUP_GO b/versions/actions/ACTIONS_SETUP_GO index a1ef0cae18..91ff57278e 100644 --- a/versions/actions/ACTIONS_SETUP_GO +++ b/versions/actions/ACTIONS_SETUP_GO @@ -1 +1 @@ -5.0.2 +5.2.0 diff --git a/versions/actions/ACTIONS_SETUP_NODE b/versions/actions/ACTIONS_SETUP_NODE index c4e41f9459..ee74734aa2 100644 --- a/versions/actions/ACTIONS_SETUP_NODE +++ b/versions/actions/ACTIONS_SETUP_NODE @@ -1 +1 @@ -4.0.3 +4.1.0 diff --git a/versions/actions/ACTIONS_UPLOAD_ARTIFACT b/versions/actions/ACTIONS_UPLOAD_ARTIFACT index 43270543f7..9e3a93350d 100644 --- a/versions/actions/ACTIONS_UPLOAD_ARTIFACT +++ b/versions/actions/ACTIONS_UPLOAD_ARTIFACT @@ -1 +1 @@ -4.3.6 +4.4.3 diff --git a/versions/actions/CODECOV_CODECOV_ACTION b/versions/actions/CODECOV_CODECOV_ACTION index a84947d6ff..ac14c3dfaa 100644 --- a/versions/actions/CODECOV_CODECOV_ACTION +++ b/versions/actions/CODECOV_CODECOV_ACTION @@ -1 +1 @@ -4.5.0 +5.1.1 diff --git a/versions/actions/CRAZY_MAX_GHACTION_IMPORT_GPG b/versions/actions/CRAZY_MAX_GHACTION_IMPORT_GPG index dfda3e0b4f..6abaeb2f90 100644 --- a/versions/actions/CRAZY_MAX_GHACTION_IMPORT_GPG +++ b/versions/actions/CRAZY_MAX_GHACTION_IMPORT_GPG @@ -1 +1 @@ -6.1.0 +6.2.0 diff --git a/versions/actions/DOCKER_SETUP_BUILDX_ACTION b/versions/actions/DOCKER_SETUP_BUILDX_ACTION index 9575d51bad..a76ccff2a6 100644 --- a/versions/actions/DOCKER_SETUP_BUILDX_ACTION +++ b/versions/actions/DOCKER_SETUP_BUILDX_ACTION @@ -1 +1 @@ -3.6.1 +3.7.1 diff --git a/versions/actions/GITHUB_CODEQL_ACTION_ANALYZE b/versions/actions/GITHUB_CODEQL_ACTION_ANALYZE index a36e9b0906..7329e21c3b 100644 --- a/versions/actions/GITHUB_CODEQL_ACTION_ANALYZE +++ b/versions/actions/GITHUB_CODEQL_ACTION_ANALYZE @@ -1 +1 @@ -2.18.1 +2.20.0 diff --git a/versions/actions/GITHUB_CODEQL_ACTION_AUTOBUILD b/versions/actions/GITHUB_CODEQL_ACTION_AUTOBUILD index a36e9b0906..7329e21c3b 100644 --- a/versions/actions/GITHUB_CODEQL_ACTION_AUTOBUILD +++ b/versions/actions/GITHUB_CODEQL_ACTION_AUTOBUILD @@ -1 +1 @@ -2.18.1 +2.20.0 diff --git a/versions/actions/GITHUB_CODEQL_ACTION_INIT b/versions/actions/GITHUB_CODEQL_ACTION_INIT index a36e9b0906..7329e21c3b 100644 --- a/versions/actions/GITHUB_CODEQL_ACTION_INIT +++ b/versions/actions/GITHUB_CODEQL_ACTION_INIT @@ -1 +1 @@ -2.18.1 +2.20.0 diff --git a/versions/actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF b/versions/actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF index a36e9b0906..7329e21c3b 100644 --- a/versions/actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF +++ b/versions/actions/GITHUB_CODEQL_ACTION_UPLOAD_SARIF @@ -1 +1 @@ -2.18.1 +2.20.0 diff --git a/versions/actions/GITHUB_ISSUE_METRICS b/versions/actions/GITHUB_ISSUE_METRICS index 19811903a7..1eeac129c5 100644 --- a/versions/actions/GITHUB_ISSUE_METRICS +++ b/versions/actions/GITHUB_ISSUE_METRICS @@ -1 +1 @@ -3.8.0 +3.16.0 diff --git a/versions/actions/PETER_EVANS_CREATE_ISSUE_FROM_FILE b/versions/actions/PETER_EVANS_CREATE_ISSUE_FROM_FILE index 0062ac9718..6b244dcd69 100644 --- a/versions/actions/PETER_EVANS_CREATE_ISSUE_FROM_FILE +++ b/versions/actions/PETER_EVANS_CREATE_ISSUE_FROM_FILE @@ -1 +1 @@ -5.0.0 +5.0.1 diff --git a/versions/actions/PETER_EVANS_CREATE_PULL_REQUEST b/versions/actions/PETER_EVANS_CREATE_PULL_REQUEST index dfda3e0b4f..2be8aeb6b1 100644 --- a/versions/actions/PETER_EVANS_CREATE_PULL_REQUEST +++ b/versions/actions/PETER_EVANS_CREATE_PULL_REQUEST @@ -1 +1 @@ -6.1.0 +7.0.5 diff --git a/versions/actions/REVIEWDOG_ACTION_HADOLINT b/versions/actions/REVIEWDOG_ACTION_HADOLINT index 50aceaa7b7..5525f03fa6 100644 --- a/versions/actions/REVIEWDOG_ACTION_HADOLINT +++ b/versions/actions/REVIEWDOG_ACTION_HADOLINT @@ -1 +1 @@ -1.45.0 +1.48.1 diff --git a/versions/actions/REVIEWDOG_ACTION_LANGUAGETOOL b/versions/actions/REVIEWDOG_ACTION_LANGUAGETOOL index 815d5ca06d..3989355915 100644 --- a/versions/actions/REVIEWDOG_ACTION_LANGUAGETOOL +++ b/versions/actions/REVIEWDOG_ACTION_LANGUAGETOOL @@ -1 +1 @@ -1.19.0 +1.20.0 diff --git a/versions/actions/SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET b/versions/actions/SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET index 6a126f402d..84298f96d7 100644 --- a/versions/actions/SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET +++ b/versions/actions/SHOGO82148_ACTIONS_UPLOAD_RELEASE_ASSET @@ -1 +1 @@ -1.7.5 +1.7.8 diff --git a/versions/actions/SOFTPROPS_ACTION_GH_RELEASE b/versions/actions/SOFTPROPS_ACTION_GH_RELEASE index 815e68dd20..ccbccc3dc6 100644 --- a/versions/actions/SOFTPROPS_ACTION_GH_RELEASE +++ b/versions/actions/SOFTPROPS_ACTION_GH_RELEASE @@ -1 +1 @@ -2.0.8 +2.2.0