From 906dbcb78eddbb1250b935f73105aa9eb3de4e85 Mon Sep 17 00:00:00 2001 From: Tim Gross Date: Wed, 27 Mar 2024 15:51:34 +0000 Subject: [PATCH] backport of commit c3e7b13d54eaf868974a0a31a721d04ff97c024b --- .changelog/18607.txt | 3 - .changelog/19101.txt | 3 - .changelog/19496.txt | 3 - .changelog/19544.txt | 6 - .changelog/19839.txt | 3 - .changelog/19985.txt | 3 - .changelog/19989.txt | 3 - .changelog/20029.txt | 3 - .changelog/20047.txt | 3 - .changelog/20126.txt | 3 - .changelog/20156.txt | 3 - .changelog/20173.txt | 3 - .changelog/20218.txt | 3 - .github/workflows/actionlint.yml | 2 +- .github/workflows/backport.yml | 2 +- .github/workflows/build.yml | 14 +- .github/workflows/checks.yaml | 2 +- .github/workflows/copywrite.yml | 2 +- .github/workflows/ember-test-audit.yml | 4 +- .github/workflows/release.yml | 2 +- .github/workflows/semgrep.yml | 2 +- .github/workflows/test-core.yaml | 10 +- .github/workflows/test-e2e.yml | 4 +- .github/workflows/test-ui.yml | 6 +- .github/workflows/test-windows.yml | 2 +- CHANGELOG-unsupported.md | 420 --- CHANGELOG.md | 603 ++-- GNUmakefile | 2 +- api/go.mod | 2 +- api/go.sum | 4 +- api/tasks.go | 121 +- api/tasks_test.go | 1 - client/agent_endpoint.go | 2 +- client/agent_endpoint_test.go | 2 +- client/alloc_endpoint.go | 5 +- client/alloc_endpoint_test.go | 2 +- client/allocdir/alloc_dir.go | 189 +- client/allocdir/alloc_dir_test.go | 234 +- client/allocdir/fs_darwin.go | 2 +- client/allocdir/fs_default.go | 13 - client/allocdir/fs_freebsd.go | 2 +- client/allocdir/fs_linux.go | 22 +- client/allocdir/fs_netbsd.go | 2 +- client/allocdir/fs_solaris.go | 2 +- client/allocdir/fs_unix.go | 10 +- client/allocdir/fs_windows.go | 2 +- client/allocdir/task_dir.go | 186 +- client/allocdir/task_dir_linux.go | 12 +- client/allocdir/task_dir_test.go | 109 +- client/allocdir/testing.go | 2 +- client/allocrunner/alloc_runner.go | 20 +- client/allocrunner/alloc_runner_hooks.go | 8 +- client/allocrunner/alloc_runner_test.go | 20 +- client/allocrunner/allocdir_hook.go | 8 +- client/allocrunner/consul_grpc_sock_hook.go | 27 +- client/allocrunner/consul_hook.go | 4 +- client/allocrunner/consul_http_sock_hook.go | 23 +- client/allocrunner/interfaces/runner.go | 2 +- client/allocrunner/migrate_hook.go | 8 +- .../taskrunner/connect_native_hook_test.go | 11 +- .../taskrunner/dispatch_hook_test.go | 13 +- .../taskrunner/dynamic_users_hook.go | 124 - .../taskrunner/dynamic_users_hook_test.go | 203 -- .../taskrunner/envoy_bootstrap_hook_test.go | 13 +- .../taskrunner/envoy_version_hook_test.go | 13 +- .../allocrunner/taskrunner/getter/params.go | 7 +- .../taskrunner/getter/params_test.go | 1 - .../allocrunner/taskrunner/getter/sandbox.go | 2 - .../taskrunner/getter/sandbox_test.go | 31 - client/allocrunner/taskrunner/getter/util.go | 4 - .../allocrunner/taskrunner/task_dir_hook.go | 16 +- client/allocrunner/taskrunner/task_runner.go | 10 +- .../taskrunner/task_runner_hooks.go | 1 - .../taskrunner/task_runner_linux_test.go | 3 +- .../taskrunner/task_runner_test.go | 2 +- .../taskrunner/template/template_test.go | 4 +- client/allocrunner/taskrunner/volume_hook.go | 2 - client/allocwatcher/alloc_watcher.go | 12 +- client/allocwatcher/alloc_watcher_test.go | 6 +- client/client.go | 29 +- client/client_interface_test.go | 2 +- client/client_test.go | 29 - client/config/arconfig.go | 6 +- client/config/config.go | 16 - client/config/config_test.go | 123 +- client/config/users.go | 32 - client/config/users_test.go | 59 - client/fs_endpoint.go | 2 +- client/fs_endpoint_test.go | 4 +- client/heartbeatstop.go | 12 +- client/heartbeatstop_test.go | 67 +- client/logmon/logging/rotator_test.go | 64 +- client/logmon/logmon_test.go | 99 +- client/rpc.go | 2 +- client/state/upgrade.go | 2 +- client/structs/structs.go | 2 +- client/testutil/rpc.go | 2 +- command/acl_binding_rule_update_test.go | 3 +- command/acl_bootstrap_test.go | 5 +- command/acl_role_create_test.go | 26 +- command/acl_role_delete_test.go | 20 +- command/acl_role_info_test.go | 36 +- command/acl_role_list_test.go | 24 +- command/acl_role_test.go | 8 +- command/acl_role_update_test.go | 48 +- command/acl_token_create_test.go | 25 +- command/agent/agent.go | 6 - command/agent/agent_endpoint.go | 2 +- command/agent/agent_endpoint_test.go | 2 +- command/agent/alloc_endpoint.go | 2 +- command/agent/alloc_endpoint_test.go | 2 +- command/agent/command.go | 18 +- command/agent/command_test.go | 2 +- command/agent/config.go | 12 - command/agent/config_parse_test.go | 11 +- command/agent/consul/int_test.go | 2 +- command/agent/event_endpoint.go | 2 +- command/agent/fs_endpoint.go | 2 +- command/agent/helpers_test.go | 54 +- command/agent/host/host_test.go | 32 +- command/agent/http.go | 2 +- command/agent/http_test.go | 2 +- command/agent/job_endpoint.go | 27 +- command/agent/job_endpoint_test.go | 8 - command/agent/operator_endpoint.go | 2 +- command/agent/testdata/basic.hcl | 11 +- command/agent/testdata/basic.json | 1 - command/commands.go | 5 - command/data_format.go | 2 +- command/deployment_fail_test.go | 9 +- command/deployment_pause_test.go | 9 +- command/deployment_promote_test.go | 9 +- command/deployment_resume_test.go | 9 +- command/deployment_status_test.go | 26 +- command/deployment_unblock_test.go | 9 +- command/eval_delete_test.go | 54 +- command/eval_list_test.go | 6 +- command/eval_status_test.go | 9 +- command/event_test.go | 4 +- command/fmt_test.go | 12 +- command/helper_devices_test.go | 25 +- command/helpers_test.go | 58 +- command/integration_test.go | 15 +- command/job_allocs_test.go | 53 +- command/job_deployments_test.go | 19 +- command/job_dispatch_test.go | 11 +- command/job_eval_test.go | 22 +- command/job_history_test.go | 8 +- command/job_init_test.go | 13 +- command/job_inspect_test.go | 8 +- command/job_periodic_force_test.go | 61 +- command/job_plan_test.go | 24 +- command/job_promote_test.go | 13 +- command/job_restart_test.go | 1 + command/job_revert_test.go | 8 +- command/job_run_test.go | 32 +- command/job_status_test.go | 35 +- command/job_validate_test.go | 24 +- command/license_get_test.go | 14 +- command/meta_test.go | 5 +- command/monitor_test.go | 3 +- command/namespace_apply_test.go | 5 +- command/namespace_delete_test.go | 15 +- command/namespace_inspect_test.go | 26 +- command/namespace_status_test.go | 5 +- command/node_drain_test.go | 40 +- command/node_eligibility_test.go | 7 +- command/node_pool_jobs_test.go | 7 +- command/node_status_test.go | 18 +- command/operator_api_test.go | 27 +- command/operator_autopilot_health.go | 188 -- command/operator_autopilot_health_test.go | 33 - command/operator_autopilot_set_test.go | 31 +- command/operator_debug_test.go | 1 - command/operator_gossip_keyring_test.go | 13 +- command/operator_raft_remove_test.go | 29 +- command/operator_scheduler_get_config_test.go | 20 +- command/operator_scheduler_set_config_test.go | 38 +- command/operator_snapshot_inspect_test.go | 18 +- command/operator_snapshot_restore_test.go | 26 +- command/operator_snapshot_save_test.go | 20 +- command/plugin_status_test.go | 16 +- command/quota_delete_test.go | 15 +- command/quota_init_test.go | 58 +- command/quota_list_test.go | 5 +- command/recommendation_apply_test.go | 26 +- command/recommendation_dismiss_test.go | 43 +- command/recommendation_info_test.go | 35 +- command/recommendation_list_test.go | 43 +- command/scaling_policy_list_test.go | 24 +- command/scaling_policy_test.go | 4 +- command/service_delete_test.go | 27 +- command/service_info_test.go | 45 +- command/service_list_test.go | 27 +- command/setup_vault_test.go | 5 +- command/status_test.go | 82 +- command/testdata/example-short-bad.json | 4 - command/testdata/example-short.json | 1 - command/tls_ca_create_test.go | 50 +- command/tls_cert_create_test.go | 48 +- command/ui_test.go | 9 +- command/var_get_test.go | 51 +- command/var_init_test.go | 70 +- command/var_list_test.go | 56 +- command/var_lock_test.go | 5 +- command/var_purge_test.go | 51 +- command/var_put_test.go | 41 +- command/volume_register_test.go | 19 +- command/volume_status_test.go | 12 +- contributing/architecture-eval-triggers.md | 4 +- drivers/docker/config.go | 3 +- drivers/docker/cpuset_test.go | 2 - drivers/exec/driver.go | 3 +- drivers/java/driver.go | 7 +- drivers/mock/driver.go | 3 +- drivers/mock/driver_test.go | 5 +- drivers/qemu/driver.go | 3 +- drivers/rawexec/driver.go | 39 +- drivers/rawexec/driver_test.go | 12 +- drivers/rawexec/driver_unix_test.go | 2 +- drivers/shared/executor/executor.go | 5 + .../shared/executor/executor_linux_test.go | 5 +- drivers/shared/executor/executor_test.go | 9 +- drivers/shared/executor/grpc_client.go | 33 +- drivers/shared/executor/grpc_server.go | 33 +- drivers/shared/executor/proto/executor.pb.go | 141 +- drivers/shared/executor/proto/executor.proto | 2 +- e2e/artifact/input/artifact_limits.nomad | 2 +- e2e/artifact/input/artifact_linux.nomad | 12 +- e2e/connect/client.go | 2 - e2e/consul/input/consul_wi.nomad.hcl | 62 - e2e/consul/namespaces.go | 5 +- e2e/csi/ebs.go | 10 +- e2e/e2eutil/consul.go | 6 +- e2e/e2eutil/utils.go | 8 +- e2e/isolation/input/chroot_docker.nomad | 2 +- e2e/isolation/pids_test.go | 15 +- e2e/metrics/metrics_test.go | 4 +- e2e/oversubscription/input/rawexec.hcl | 38 - e2e/oversubscription/oversubscription_test.go | 44 +- .../input/oversubmax.hcl} | 0 e2e/rawexec/rawexec_test.go | 21 + e2e/terraform/.terraform.lock.hcl | 37 +- e2e/terraform/README.md | 6 - e2e/terraform/ecs.tf | 12 +- e2e/terraform/ecs.tftpl | 2 - e2e/terraform/nomad-acls.tf | 45 +- e2e/terraform/outputs.tf | 2 +- e2e/terraform/volumes.tf | 28 +- e2e/terraform/volumes.tftpl | 15 - e2e/v3/cluster3/cluster3.go | 121 - e2e/v3/jobs3/jobs3.go | 109 - e2e/vaultsecrets/vaultsecrets_test.go | 8 +- go.mod | 35 +- go.sum | 86 +- helper/boltdd/boltdd.go | 2 +- helper/boltdd/boltdd_test.go | 2 +- helper/pluginutils/hclutils/testing.go | 2 +- helper/pluginutils/hclutils/types.go | 2 +- helper/pluginutils/hclutils/util.go | 2 +- helper/pool/pool.go | 2 +- helper/raftutil/msgpack.go | 2 +- helper/raftutil/msgpack_test.go | 2 +- helper/raftutil/state.go | 3 +- helper/snapshot/snapshot_test.go | 2 +- helper/subproc/self.go | 33 +- helper/subproc/self_test.go | 15 - helper/subproc/subproc.go | 9 +- helper/users/dynamic/pool.go | 176 -- helper/users/dynamic/pool_test.go | 119 - helper/users/dynamic/users.go | 61 - helper/users/lookup.go | 44 +- helper/users/lookup_linux_test.go | 16 - jobspec/parse.go | 36 - jobspec/parse_group.go | 9 - jobspec/parse_test.go | 42 +- jobspec/test-fixtures/basic.hcl | 7 - nomad/acl_endpoint_test.go | 2 +- nomad/acl_test.go | 2 +- nomad/alloc_endpoint_test.go | 2 +- nomad/client_agent_endpoint.go | 2 +- nomad/client_agent_endpoint_test.go | 2 +- nomad/client_alloc_endpoint.go | 2 +- nomad/client_alloc_endpoint_test.go | 4 +- nomad/client_csi_endpoint_test.go | 2 +- nomad/client_fs_endpoint.go | 2 +- nomad/client_fs_endpoint_test.go | 4 +- nomad/client_rpc.go | 4 +- nomad/client_stats_endpoint_test.go | 2 +- nomad/config.go | 3 - nomad/core_sched_test.go | 2 +- nomad/csi_endpoint_test.go | 2 +- nomad/deployment_endpoint_test.go | 2 +- nomad/drainer_int_test.go | 2 +- nomad/encrypter_test.go | 2 +- nomad/eval_broker_test.go | 2 +- nomad/eval_endpoint_test.go | 2 +- nomad/event_endpoint.go | 2 +- nomad/event_endpoint_test.go | 4 +- nomad/fsm.go | 2 +- nomad/fsm_registry_ce.go | 2 +- nomad/heartbeat_test.go | 76 +- nomad/job_endpoint_ce_test.go | 2 +- nomad/job_endpoint_test.go | 2 +- nomad/keyring_endpoint_test.go | 2 +- nomad/namespace_endpoint_test.go | 2 +- nomad/node_endpoint_test.go | 368 ++- nomad/node_pool_endpoint_test.go | 2 +- nomad/operator_endpoint.go | 2 +- nomad/operator_endpoint_test.go | 204 +- nomad/periodic_endpoint_test.go | 2 +- nomad/plan_apply.go | 2 +- nomad/plan_endpoint_test.go | 2 +- nomad/plan_normalization_test.go | 2 +- nomad/regions_endpoint_test.go | 2 +- nomad/rpc.go | 2 +- nomad/rpc_test.go | 4 +- nomad/scaling_endpoint_test.go | 2 +- nomad/search_endpoint_test.go | 2 +- nomad/server.go | 16 +- nomad/server_test.go | 2 +- nomad/service_registration_endpoint_test.go | 2 +- nomad/status_endpoint_test.go | 2 +- nomad/stream/ndjson.go | 2 +- nomad/structs/alloc_test.go | 503 +--- nomad/structs/config/artifact_test.go | 3 +- nomad/structs/config/users.go | 100 - nomad/structs/config/users_test.go | 139 - nomad/structs/diff.go | 29 - nomad/structs/diff_test.go | 216 -- nomad/structs/encoding.go | 2 +- nomad/structs/generate.sh | 2 +- nomad/structs/group.go | 139 - nomad/structs/group_test.go | 275 -- nomad/structs/handlers.go | 2 +- nomad/structs/structs.go | 258 +- nomad/structs/structs_test.go | 470 ++- nomad/structs/volume_test.go | 61 - nomad/structs/volumes.go | 64 +- nomad/system_endpoint_test.go | 2 +- nomad/timetable.go | 2 +- nomad/timetable_test.go | 2 +- nomad/variables_endpoint_test.go | 2 +- plugins/base/plugin.go | 2 +- plugins/base/plugin_test.go | 54 +- plugins/csi/client_test.go | 101 +- plugins/drivers/client.go | 12 +- plugins/drivers/driver.go | 31 +- plugins/drivers/fsisolation/isolation.go | 25 - plugins/drivers/proto/driver.pb.go | 516 ++-- plugins/drivers/proto/driver.proto | 7 - plugins/drivers/server.go | 10 +- plugins/drivers/testutils/exec_testing.go | 54 +- plugins/drivers/testutils/testing.go | 34 +- plugins/drivers/testutils/testing_test.go | 61 +- plugins/drivers/utils.go | 2 - plugins/drivers/utils_test.go | 11 +- plugins/shared/structs/attribute_test.go | 10 +- scheduler/feasible_test.go | 42 +- scheduler/generic_sched_test.go | 293 +- scheduler/reconcile.go | 57 +- scheduler/reconcile_test.go | 423 ++- scheduler/reconcile_util.go | 12 +- scheduler/reconcile_util_test.go | 2588 ++++++++--------- .../reconnecting_picker.go | 147 - .../reconnecting_picker_test.go | 477 --- scheduler/stack_test.go | 199 +- scheduler/util.go | 32 - scheduler/util_test.go | 36 - testutil/tls.go | 23 +- testutil/vault.go | 3 +- testutil/wait_test.go | 9 +- ui/app/adapters/variable.js | 10 - ui/app/components/action-card.hbs | 1 - ui/app/components/client-node-row.js | 30 +- ui/app/components/exec-terminal.js | 38 - ui/app/components/variable-form.hbs | 113 +- ui/app/components/variable-form.js | 26 +- .../components/variable-form/input-group.hbs | 33 +- .../variable-form/namespace-filter.hbs | 25 +- .../variable-form/related-entities.hbs | 20 +- ui/app/components/variable-paths.hbs | 60 +- ui/app/controllers/clients/index.js | 175 +- ui/app/controllers/evaluations/index.js | 3 +- ui/app/controllers/exec.js | 12 +- ui/app/controllers/jobs/run/templates/new.js | 17 +- .../controllers/variables/variable/index.js | 4 - ui/app/styles/components/actions.scss | 13 - ui/app/styles/components/variables.scss | 94 +- ui/app/styles/core/table.scss | 6 - .../allocations/allocation/task/index.hbs | 2 +- ui/app/templates/clients/index.hbs | 220 +- .../templates/components/client-node-row.hbs | 40 +- ui/app/templates/evaluations/index.hbs | 2 +- ui/app/templates/exec.hbs | 2 +- ui/app/templates/jobs/run/templates/new.hbs | 5 - ui/app/templates/variables/index.hbs | 76 +- ui/app/templates/variables/new.hbs | 27 +- ui/app/templates/variables/path.hbs | 62 +- ui/app/templates/variables/variable/edit.hbs | 43 +- ui/app/templates/variables/variable/index.hbs | 131 +- ui/mirage/factories/job.js | 5 + ui/package.json | 6 +- ui/tests/acceptance/actions-test.js | 27 +- ui/tests/acceptance/clients-list-test.js | 104 +- ui/tests/acceptance/exec-test.js | 44 +- ui/tests/acceptance/job-status-panel-test.js | 3 - ui/tests/acceptance/variables-test.js | 42 +- ui/tests/helpers/helios.js | 38 - .../components/variable-form-test.js | 154 +- ui/tests/pages/clients/list.js | 29 +- ui/yarn.lock | 16 +- website/.husky/pre-commit | 3 - website/.nvmrc | 2 +- .../commands/operator/autopilot/health.mdx | 45 - website/content/docs/concepts/security.mdx | 17 +- website/content/docs/configuration/client.mdx | 6 +- website/content/docs/configuration/server.mdx | 4 +- .../docs/job-specification/disconnect.mdx | 201 -- .../docs/job-specification/ephemeral_disk.mdx | 15 + .../content/docs/job-specification/group.mdx | 85 +- .../docs/job-specification/resources.mdx | 5 +- .../docs/job-specification/volume_mount.mdx | 8 - .../content/docs/upgrade/upgrade-specific.mdx | 14 - .../content/plugins/drivers/community/rkt.mdx | 6 +- website/content/tools/index.mdx | 1 + website/data/docs-nav-data.json | 8 - website/package-lock.json | 910 +++--- website/package.json | 20 +- website/redirects.js | 30 +- 430 files changed, 6751 insertions(+), 11972 deletions(-) delete mode 100644 .changelog/18607.txt delete mode 100644 .changelog/19101.txt delete mode 100644 .changelog/19496.txt delete mode 100644 .changelog/19544.txt delete mode 100644 .changelog/19839.txt delete mode 100644 .changelog/19985.txt delete mode 100644 .changelog/19989.txt delete mode 100644 .changelog/20029.txt delete mode 100644 .changelog/20047.txt delete mode 100644 .changelog/20126.txt delete mode 100644 .changelog/20156.txt delete mode 100644 .changelog/20173.txt delete mode 100644 .changelog/20218.txt delete mode 100644 client/allocdir/fs_default.go delete mode 100644 client/allocrunner/taskrunner/dynamic_users_hook.go delete mode 100644 client/allocrunner/taskrunner/dynamic_users_hook_test.go delete mode 100644 client/config/users.go delete mode 100644 client/config/users_test.go delete mode 100644 command/operator_autopilot_health.go delete mode 100644 command/operator_autopilot_health_test.go delete mode 100644 e2e/consul/input/consul_wi.nomad.hcl delete mode 100644 e2e/oversubscription/input/rawexec.hcl rename e2e/{oversubscription/input/rawexecmax.hcl => rawexec/input/oversubmax.hcl} (100%) delete mode 100644 e2e/terraform/ecs.tftpl delete mode 100644 e2e/terraform/volumes.tftpl delete mode 100644 helper/subproc/self_test.go delete mode 100644 helper/users/dynamic/pool.go delete mode 100644 helper/users/dynamic/pool_test.go delete mode 100644 helper/users/dynamic/users.go delete mode 100644 nomad/structs/config/users.go delete mode 100644 nomad/structs/config/users_test.go delete mode 100644 nomad/structs/group.go delete mode 100644 nomad/structs/group_test.go delete mode 100644 plugins/drivers/fsisolation/isolation.go delete mode 100644 scheduler/reconnecting_picker/reconnecting_picker.go delete mode 100644 scheduler/reconnecting_picker/reconnecting_picker_test.go delete mode 100644 ui/tests/helpers/helios.js delete mode 100644 website/.husky/pre-commit delete mode 100644 website/content/docs/commands/operator/autopilot/health.mdx delete mode 100644 website/content/docs/job-specification/disconnect.mdx diff --git a/.changelog/18607.txt b/.changelog/18607.txt deleted file mode 100644 index 90733578bae6..000000000000 --- a/.changelog/18607.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: change the State filter on clients page to split out eligibility and drain status -``` diff --git a/.changelog/19101.txt b/.changelog/19101.txt deleted file mode 100644 index d5fdf1c7a5fc..000000000000 --- a/.changelog/19101.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -scheduler: Added a new configuration to avoid rescheduling allocations if a nodes misses one or more heartbits -``` diff --git a/.changelog/19496.txt b/.changelog/19496.txt deleted file mode 100644 index f398000f735c..000000000000 --- a/.changelog/19496.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Added a copy button on Action output -``` diff --git a/.changelog/19544.txt b/.changelog/19544.txt deleted file mode 100644 index cd2dbb62f8e9..000000000000 --- a/.changelog/19544.txt +++ /dev/null @@ -1,6 +0,0 @@ -```release-note:improvement -ui: Replaced single-line variable value fields with multi-line textarea blocks -``` -```release-note:improvement -ui: Updated the style of components in the Variables web ui -``` diff --git a/.changelog/19839.txt b/.changelog/19839.txt deleted file mode 100644 index 981a140904f7..000000000000 --- a/.changelog/19839.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -client/volumes: Add a mount volume level option for selinux tags on volumes -``` diff --git a/.changelog/19985.txt b/.changelog/19985.txt deleted file mode 100644 index 08c3464316a8..000000000000 --- a/.changelog/19985.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Prompt a user before they close an exec window to prevent accidental close-browser-tab shortcuts that overlap with terminal ones -``` diff --git a/.changelog/19989.txt b/.changelog/19989.txt deleted file mode 100644 index fcdf612eb27f..000000000000 --- a/.changelog/19989.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -ui: Improve error and warning messages for invalid variable and job template paths/names -``` diff --git a/.changelog/20029.txt b/.changelog/20029.txt deleted file mode 100644 index 7d083dfcc5d1..000000000000 --- a/.changelog/20029.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -server: Add new options for reconcilation in case of disconnected nodes -``` diff --git a/.changelog/20047.txt b/.changelog/20047.txt deleted file mode 100644 index 6bc017277351..000000000000 --- a/.changelog/20047.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: Fixed an issue where keynav would not trigger evaluation sidebar expand -``` diff --git a/.changelog/20126.txt b/.changelog/20126.txt deleted file mode 100644 index 08efed5a00ba..000000000000 --- a/.changelog/20126.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -artifact: Added support for downloading artifacts without validating the TLS certificate -``` diff --git a/.changelog/20156.txt b/.changelog/20156.txt deleted file mode 100644 index c7d0b820d4cb..000000000000 --- a/.changelog/20156.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -autopilot: Added `operator autopilot health` command to review Autopilot health data -``` diff --git a/.changelog/20173.txt b/.changelog/20173.txt deleted file mode 100644 index 45fe6c2e8aec..000000000000 --- a/.changelog/20173.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -deps: Update msgpack to v2 -``` diff --git a/.changelog/20218.txt b/.changelog/20218.txt deleted file mode 100644 index e0e21bfadb04..000000000000 --- a/.changelog/20218.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:bug -ui: Show the namespace in the web UI exec command hint -``` diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index daac2a9e1818..5feae0eb2688 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -10,6 +10,6 @@ jobs: actionlint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: "Check workflow files" uses: docker://docker.mirror.hashicorp.services/rhysd/actionlint:latest diff --git a/.github/workflows/backport.yml b/.github/workflows/backport.yml index eca242ffc9e3..419b25196e07 100644 --- a/.github/workflows/backport.yml +++ b/.github/workflows/backport.yml @@ -38,7 +38,7 @@ jobs: if: always() && needs.backport.result == 'failure' runs-on: ${{ endsWith(github.repository, '-enterprise') && fromJSON('["self-hosted", "ondemand", "linux"]') || 'ubuntu-latest' }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: ./.github/actions/vault-secrets with: paths: |- diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8299ce1f41b1..1b78814933a6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,7 +28,7 @@ jobs: outputs: go-version: ${{ steps.get-go-version.outputs.go-version }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: ref: ${{ github.event.inputs.build-ref }} - name: Determine Go version @@ -43,7 +43,7 @@ jobs: outputs: product-version: ${{ steps.get-product-version.outputs.product-version }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: ref: ${{ github.event.inputs.build-ref }} - name: get product version @@ -58,7 +58,7 @@ jobs: filepath: ${{ steps.generate-metadata-file.outputs.filepath }} steps: - name: "Checkout directory" - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: ref: ${{ github.event.inputs.build-ref }} - name: Generate metadata file @@ -86,7 +86,7 @@ jobs: name: Go ${{ needs.get-go-version.outputs.go-version }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: ref: ${{ github.event.inputs.build-ref }} - name: Setup go @@ -138,7 +138,7 @@ jobs: name: Go ${{ needs.get-go-version.outputs.go-version }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: ref: ${{ github.event.inputs.build-ref }} - name: Setup go @@ -243,7 +243,7 @@ jobs: name: Go ${{ needs.get-go-version.outputs.go-version }} ${{ matrix.goos }} ${{ matrix.goarch }} build steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: ref: ${{ github.event.inputs.build-ref }} @@ -305,7 +305,7 @@ jobs: version: ${{needs.get-product-version.outputs.product-version}} revision: ${{github.sha}} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Set revision if: "${{ github.event.inputs.build-ref != '' }}" run: | diff --git a/.github/workflows/checks.yaml b/.github/workflows/checks.yaml index 1e652c82ca95..24d4f3de4ebc 100644 --- a/.github/workflows/checks.yaml +++ b/.github/workflows/checks.yaml @@ -24,7 +24,7 @@ jobs: runs-on: ${{ endsWith(github.repository, '-enterprise') && fromJSON('["self-hosted", "ondemand", "linux", "disk_gb=255"]') || 'ubuntu-22.04' }} timeout-minutes: 15 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: fetch-depth: 0 # needs tags for checkproto - uses: ./.github/actions/vault-secrets diff --git a/.github/workflows/copywrite.yml b/.github/workflows/copywrite.yml index e0ea1480b820..21e737b43a32 100644 --- a/.github/workflows/copywrite.yml +++ b/.github/workflows/copywrite.yml @@ -7,7 +7,7 @@ jobs: copywrite: runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: hashicorp/setup-copywrite@867a1a2a064a0626db322392806428f7dc59cb3e # v1.1.2 name: Setup Copywrite with: diff --git a/.github/workflows/ember-test-audit.yml b/.github/workflows/ember-test-audit.yml index c54bd24ea201..b71d5fcfc8c8 100644 --- a/.github/workflows/ember-test-audit.yml +++ b/.github/workflows/ember-test-audit.yml @@ -15,7 +15,7 @@ jobs: time-base: runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 with: ref: ${{ github.event.pull_request.base.sha }} - uses: nanasess/setup-chromedriver@69cc01d772a1595b8aee87d52f53e71b3904d9d0 # v2.1.2 @@ -34,7 +34,7 @@ jobs: time-pr: runs-on: ubuntu-latest steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: nanasess/setup-chromedriver@69cc01d772a1595b8aee87d52f53e71b3904d9d0 # v2.1.2 - name: Use Node.js uses: actions/setup-node@e33196f7422957bea03ed53f6fbb155025ffc7b8 # v3.7.0 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 08257d85a7af..1f84d67c0bd7 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -52,7 +52,7 @@ jobs: echo "::error::Version ${{ github.event.inputs.version }} is invalid" exit 1 fi - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: ./.github/actions/vault-secrets with: paths: |- diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml index 13598e20f114..da70b73c9f45 100644 --- a/.github/workflows/semgrep.yml +++ b/.github/workflows/semgrep.yml @@ -16,7 +16,7 @@ jobs: # Skip any PR created by dependabot to avoid permission issues if: (github.actor != 'dependabot[bot]') steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - run: semgrep ci --config=.semgrep/ permissions: contents: read diff --git a/.github/workflows/test-core.yaml b/.github/workflows/test-core.yaml index 6ae4f41878f6..ddd3654b1aa0 100644 --- a/.github/workflows/test-core.yaml +++ b/.github/workflows/test-core.yaml @@ -52,7 +52,7 @@ jobs: runs-on: ${{ endsWith(github.repository, '-enterprise') && fromJSON('["self-hosted", "ondemand", "linux"]') || 'ubuntu-22.04' }} timeout-minutes: 10 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: ./.github/actions/vault-secrets with: paths: |- @@ -73,11 +73,11 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-22.04, macos-14, windows-2019] + os: [ubuntu-22.04, macos-11, windows-2019] runs-on: ${{matrix.os}} timeout-minutes: 20 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: hashicorp/setup-golang@v3 - name: Run make dev run: | @@ -88,7 +88,7 @@ jobs: runs-on: [custom, xl, 22.04] timeout-minutes: 8 steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: hashicorp/setup-golang@v3 - name: Run API tests env: @@ -112,7 +112,7 @@ jobs: - drivers - quick steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: hashicorp/setup-golang@v3 - name: Run Matrix Tests env: diff --git a/.github/workflows/test-e2e.yml b/.github/workflows/test-e2e.yml index f83eaae452f6..ecf2167ff54b 100644 --- a/.github/workflows/test-e2e.yml +++ b/.github/workflows/test-e2e.yml @@ -38,7 +38,7 @@ jobs: test-e2e-vault: runs-on: ${{ endsWith(github.repository, '-enterprise') && fromJSON('["self-hosted", "ondemand", "linux"]') || 'ubuntu-latest' }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: ./.github/actions/vault-secrets with: paths: |- @@ -55,7 +55,7 @@ jobs: test-e2e-consul: runs-on: 'ubuntu-22.04' # this job requires sudo, so not currently suitable for self-hosted runners steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Git config token if: endsWith(github.repository, '-enterprise') run: git config --global url.'https://${{ secrets.ELEVATED_GITHUB_TOKEN }}@github.com'.insteadOf 'https://github.com' diff --git a/.github/workflows/test-ui.yml b/.github/workflows/test-ui.yml index b7d1db69c8da..4dcdda8b1472 100644 --- a/.github/workflows/test-ui.yml +++ b/.github/workflows/test-ui.yml @@ -21,7 +21,7 @@ jobs: outputs: nonce: ${{ steps.nonce.outputs.nonce }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: ./.github/actions/setup-js - name: lint:js run: yarn run lint:js @@ -45,7 +45,7 @@ jobs: partition: [1, 2, 3, 4] split: [4] steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: ./.github/actions/setup-js - uses: browser-actions/setup-chrome@c485fa3bab6be59dce18dbc18ef6ab7cbc8ff5f1 # v1.2.0 - uses: ./.github/actions/vault-secrets @@ -68,7 +68,7 @@ jobs: run: working-directory: ui steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - uses: ./.github/actions/setup-js - uses: ./.github/actions/vault-secrets with: diff --git a/.github/workflows/test-windows.yml b/.github/workflows/test-windows.yml index cc293ca03d24..902ff2a66214 100644 --- a/.github/workflows/test-windows.yml +++ b/.github/workflows/test-windows.yml @@ -50,7 +50,7 @@ jobs: - name: Docker Info run: docker version - run: git config --global core.autocrlf false - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@c85c95e3d7251135ab7dc9ce3241c5835cc595a9 # v3.5.3 - name: Setup go uses: actions/setup-go@fac708d6674e30b6ba41289acaab6d4b75aa0753 # v4.0.1 with: diff --git a/CHANGELOG-unsupported.md b/CHANGELOG-unsupported.md index ab0515d7957a..188871ad3280 100644 --- a/CHANGELOG-unsupported.md +++ b/CHANGELOG-unsupported.md @@ -2,426 +2,6 @@ The versions of Nomad listed here are no longer supported by HashiCorp. -## 1.4.14 (October 30, 2023) - -SECURITY: - -* build: Update to Go 1.21.3 [[GH-18717](https://github.com/hashicorp/nomad/issues/18717)] - -BUG FIXES: - -* build: Add `timetzdata` Go build tag on Windows binaries to embed time zone data so periodic jobs are able to specify a time zone value on Windows environments [[GH-18676](https://github.com/hashicorp/nomad/issues/18676)] -* cli: Fixed an unexpected behavior of the `nomad acl token update` command that could cause a management token to be downgraded to client on update [[GH-18689](https://github.com/hashicorp/nomad/issues/18689)] -* client: prevent tasks from starting without the prestart hooks running [[GH-18662](https://github.com/hashicorp/nomad/issues/18662)] -* csi: check controller plugin health early during volume register/create [[GH-18570](https://github.com/hashicorp/nomad/issues/18570)] -* metrics: Fixed a bug where CPU counters could report errors for negative values [[GH-18835](https://github.com/hashicorp/nomad/issues/18835)] -* scaling: Unblock blocking queries to /v1/job/{job-id}/scale if the job goes away [[GH-18637](https://github.com/hashicorp/nomad/issues/18637)] -* scheduler (Enterprise): auto-unblock evals with associated quotas when node resources are freed up [[GH-18838](https://github.com/hashicorp/nomad/issues/18838)] -* scheduler: Ensure duplicate allocation indexes are tracked and fixed when performing job updates [[GH-18873](https://github.com/hashicorp/nomad/issues/18873)] -* services: use interpolated address when performing nomad service health checks [[GH-18584](https://github.com/hashicorp/nomad/issues/18584)] - -## 1.4.13 (September 13, 2023) - -IMPROVEMENTS: - -* build: Update to Go 1.21.0 [[GH-18184](https://github.com/hashicorp/nomad/issues/18184)] -* raft: remove use of deprecated Leader func [[GH-18352](https://github.com/hashicorp/nomad/issues/18352)] - -BUG FIXES: - -* acl: Fixed a bug where ACL tokens linked to ACL roles containing duplicate policies would cause erronous permission denined responses [[GH-18419](https://github.com/hashicorp/nomad/issues/18419)] -* cli: Add missing help message for the `-consul-namespace` flag in the `nomad job run` command [[GH-18081](https://github.com/hashicorp/nomad/issues/18081)] -* cli: Fixed a bug that prevented CSI volumes in namespaces other than `default` from being displayed in the `nomad node status -verbose` output [[GH-17925](https://github.com/hashicorp/nomad/issues/17925)] -* cli: Snapshot name is required in `volume snapshot create` command [[GH-17958](https://github.com/hashicorp/nomad/issues/17958)] -* client: Fixed a bug where the state of poststop tasks could be corrupted by client gc [[GH-17971](https://github.com/hashicorp/nomad/issues/17971)] -* client: Ignore stale server updates to prevent GCing allocations that should be running [[GH-18269](https://github.com/hashicorp/nomad/issues/18269)] -* client: return 404 instead of 500 when trying to access logs and files from allocations that have been garbage collected [[GH-18232](https://github.com/hashicorp/nomad/issues/18232)] -* core: Fixed a bug where exponential backoff could result in excessive CPU usage [[GH-18200](https://github.com/hashicorp/nomad/issues/18200)] -* csi: fixed a bug that could case a panic when deleting volumes [[GH-18234](https://github.com/hashicorp/nomad/issues/18234)] -* fingerprint: fix 'default' alias not being added to interface specified by network_interface [[GH-18096](https://github.com/hashicorp/nomad/issues/18096)] -* jobspec: Add diff for Task Group scaling block [[GH-18332](https://github.com/hashicorp/nomad/issues/18332)] -* migration: Fixed a bug where previous alloc logs were destroyed when migrating ephemeral_disk on the same client [[GH-18108](https://github.com/hashicorp/nomad/issues/18108)] -* scheduler: Fixed a bug where device IDs were not correctly filtered in constraints [[GH-18141](https://github.com/hashicorp/nomad/issues/18141)] -* services: Add validation message when `tls_skip_verify` is set to `true` on a Nomad service [[GH-18333](https://github.com/hashicorp/nomad/issues/18333)] - -## 1.4.12 (July 21, 2023) - -BUG FIXES: - -* csi: Fixed a bug in sending concurrent requests to CSI controller plugins by serializing them per plugin [[GH-17996](https://github.com/hashicorp/nomad/issues/17996)] -* csi: Fixed a bug where CSI controller requests could be sent to unhealthy plugins [[GH-17996](https://github.com/hashicorp/nomad/issues/17996)] -* csi: Fixed a bug where CSI controller requests could not be sent to controllers on nodes ineligible for scheduling [[GH-17996](https://github.com/hashicorp/nomad/issues/17996)] -* services: Fixed a bug that prevented passing query parameters in Nomad native service discovery HTTP health check paths [[GH-17936](https://github.com/hashicorp/nomad/issues/17936)] -* ui: Fixed a bug that prevented nodes from being filtered by the "Ineligible" and "Draining" state filters [[GH-17940](https://github.com/hashicorp/nomad/issues/17940)] -* ui: Fixed error handling for cross-region requests when the receiving region does not implement the endpoint being requested [[GH-18020](https://github.com/hashicorp/nomad/issues/18020)] - -## 1.4.11 (July 18, 2023) - -SECURITY: - -* acl: Fixed a bug where a namespace ACL policy without label was applied to an unexpected namespace. [CVE-2023-3072](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-3072) [[GH-17908](https://github.com/hashicorp/nomad/issues/17908)] -* search: Fixed a bug where ACL did not filter plugin and variable names in search endpoint. [CVE-2023-3300](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-3300) [[GH-17906](https://github.com/hashicorp/nomad/issues/17906)] -* sentinel (Enterprise): Fixed a bug where ACL tokens could be exfiltrated via Sentinel logs [CVE-2023-3299](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-3299) [[GH-17907](https://github.com/hashicorp/nomad/issues/17907)] - -IMPROVEMENTS: - -* cli: Add `-quiet` flag to `nomad var init` command [[GH-17526](https://github.com/hashicorp/nomad/issues/17526)] -* cni: Ensure to setup CNI addresses in deterministic order [[GH-17766](https://github.com/hashicorp/nomad/issues/17766)] -* deps: Updated Vault SDK to 0.9.0 [[GH-17281](https://github.com/hashicorp/nomad/issues/17281)] -* deps: update docker to 23.0.3 [[GH-16862](https://github.com/hashicorp/nomad/issues/16862)] - -BUG FIXES: - -* api: Fixed a bug that caused a panic when calling the `Jobs().Plan()` function with a job missing an ID [[GH-17689](https://github.com/hashicorp/nomad/issues/17689)] -* api: add missing constant for unknown allocation status [[GH-17726](https://github.com/hashicorp/nomad/issues/17726)] -* api: add missing field NetworkStatus for Allocation [[GH-17280](https://github.com/hashicorp/nomad/issues/17280)] -* cgroups: Fixed a bug removing all DevicesSets when alloc is created/removed [[GH-17535](https://github.com/hashicorp/nomad/issues/17535)] -* cli: Output error messages during deployment monitoring [[GH-17348](https://github.com/hashicorp/nomad/issues/17348)] -* client: Fixed a bug where Nomad incorrectly wrote to memory swappiness cgroup on old kernels [[GH-17625](https://github.com/hashicorp/nomad/issues/17625)] -* client: fixed a bug that prevented Nomad from fingerprinting Consul 1.13.8 correctly [[GH-17349](https://github.com/hashicorp/nomad/issues/17349)] -* consul: Fixed a bug where Nomad would repeatedly try to revoke successfully revoked SI tokens [[GH-17847](https://github.com/hashicorp/nomad/issues/17847)] -* core: Fix panic around client deregistration and pending heartbeats [[GH-17316](https://github.com/hashicorp/nomad/issues/17316)] -* core: fixed a bug that caused job validation to fail when a task with `kill_timeout` was placed inside a group with `update.progress_deadline` set to 0 [[GH-17342](https://github.com/hashicorp/nomad/issues/17342)] -* csi: Fixed a bug where CSI volumes would fail to restore during client restarts [[GH-17840](https://github.com/hashicorp/nomad/issues/17840)] -* drivers/docker: Fixed a bug where long-running docker operations would incorrectly timeout [[GH-17731](https://github.com/hashicorp/nomad/issues/17731)] -* identity: Fixed a bug where workload identities for periodic and dispatch jobs would not have access to their parent job's ACL policy [[GH-17018](https://github.com/hashicorp/nomad/issues/17018)] -* replication: Fix a potential panic when a non-authoritative region is upgraded and a server with the new version becomes the leader. [[GH-17476](https://github.com/hashicorp/nomad/issues/17476)] -* scheduler: Fixed a bug that could cause replacements for failed allocations to be placed in the wrong datacenter during a canary deployment [[GH-17653](https://github.com/hashicorp/nomad/issues/17653)] -* scheduler: Fixed a panic when a node has only one configured dynamic port [[GH-17619](https://github.com/hashicorp/nomad/issues/17619)] -* ui: dont show a service as healthy when its parent allocation stops running [[GH-17465](https://github.com/hashicorp/nomad/issues/17465)] - -## 1.4.10 (May 19, 2023) - -IMPROVEMENTS: - -* core: Prevent `task.kill_timeout` being greater than `update.progress_deadline` [[GH-16761](https://github.com/hashicorp/nomad/issues/16761)] - -BUG FIXES: - -* bug: Corrected status description and modification time for canceled evaluations [[GH-17071](https://github.com/hashicorp/nomad/issues/17071)] -* client: Fixed a bug where restarting a terminal allocation turns it into a zombie where allocation and task hooks will run unexpectedly [[GH-17175](https://github.com/hashicorp/nomad/issues/17175)] -* client: clean up resources upon failure to restore task during client restart [[GH-17104](https://github.com/hashicorp/nomad/issues/17104)] -* scale: Fixed a bug where evals could be created with the wrong type [[GH-17092](https://github.com/hashicorp/nomad/issues/17092)] -* scheduler: Fixed a bug where implicit `spread` targets were treated as separate targets for scoring [[GH-17195](https://github.com/hashicorp/nomad/issues/17195)] -* scheduler: Fixed a bug where scores for spread scheduling could be -Inf [[GH-17198](https://github.com/hashicorp/nomad/issues/17198)] - -## 1.4.9 (May 02, 2023) - -IMPROVEMENTS: - -* build: Update from Go 1.20.3 to Go 1.20.4 [[GH-17056](https://github.com/hashicorp/nomad/issues/17056)] -* dependency: update runc to 1.1.5 [[GH-16712](https://github.com/hashicorp/nomad/issues/16712)] - -BUG FIXES: - -* api: Fixed filtering on maps with missing keys [[GH-16991](https://github.com/hashicorp/nomad/issues/16991)] -* build: Linux packages now have vendor label and set the default label to HashiCorp. This fix is implemented for any future releases, but will not be updated for historical releases [[GH-16071](https://github.com/hashicorp/nomad/issues/16071)] -* client: Fix CNI plugin version fingerprint when output includes protocol version [[GH-16776](https://github.com/hashicorp/nomad/issues/16776)] -* client: Fix address for ports in IPv6 networks [[GH-16723](https://github.com/hashicorp/nomad/issues/16723)] -* client: Fixed a bug where restarting proxy sidecar tasks failed [[GH-16815](https://github.com/hashicorp/nomad/issues/16815)] -* client: Prevent a panic when an allocation has a legacy task-level bridge network and uses a driver that does not create a network namespace [[GH-16921](https://github.com/hashicorp/nomad/issues/16921)] -* core: the deployment's list endpoint now supports look up by prefix using the wildcard for namespace [[GH-16792](https://github.com/hashicorp/nomad/issues/16792)] -* csi: gracefully recover tasks that use csi node plugins [[GH-16809](https://github.com/hashicorp/nomad/issues/16809)] -* docker: Fixed a bug where plugin config values were ignored [[GH-16713](https://github.com/hashicorp/nomad/issues/16713)] -* drain: Fixed a bug where drains would complete based on the server status and not the client status of an allocation [[GH-14348](https://github.com/hashicorp/nomad/issues/14348)] -* driver/exec: Fixed a bug where `cap_drop` and `cap_add` would not expand capabilities [[GH-16643](https://github.com/hashicorp/nomad/issues/16643)] -* scale: Do not allow scale requests for jobs of type system [[GH-16969](https://github.com/hashicorp/nomad/issues/16969)] -* scheduler: Fix reconciliation of reconnecting allocs when the replacement allocations are not running [[GH-16609](https://github.com/hashicorp/nomad/issues/16609)] -* scheduler: honor false value for distinct_hosts constraint [[GH-16907](https://github.com/hashicorp/nomad/issues/16907)] -* server: Added verification of cron jobs already running before forcing new evals right after leader change [[GH-16583](https://github.com/hashicorp/nomad/issues/16583)] -* services: Fixed a bug preventing group service deregistrations after alloc restarts [[GH-16905](https://github.com/hashicorp/nomad/issues/16905)] - -## 1.4.8 (April 04, 2023) - -SECURITY: - -* build: update to Go 1.20.3 to prevent denial of service attack via malicious HTTP headers [CVE-2023-24534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-24534) [[GH-16788](https://github.com/hashicorp/nomad/issues/16788)] - -## 1.4.7 (March 21, 2023) - -IMPROVEMENTS: - -* build: Update to go1.20.2 [[GH-16427](https://github.com/hashicorp/nomad/issues/16427)] - -BUG FIXES: - -* client: Fixed a bug where clients using Consul discovery to join the cluster would get permission denied errors [[GH-16490](https://github.com/hashicorp/nomad/issues/16490)] -* client: Fixed a bug where cpuset initialization fails after Client restart [[GH-16467](https://github.com/hashicorp/nomad/issues/16467)] -* plugin: Add missing fields to `TaskConfig` so they can be accessed by external task drivers [[GH-16434](https://github.com/hashicorp/nomad/issues/16434)] -* services: Fixed a bug where a service would be deregistered twice [[GH-16289](https://github.com/hashicorp/nomad/issues/16289)] - -## 1.4.6 (March 10, 2023) - -SECURITY: - -* variables: Fixed a bug where a workload-associated policy with a deny capability was ignored for the workload's own variables [CVE-2023-1296](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-1296) [[GH-16349](https://github.com/hashicorp/nomad/issues/16349)] - -IMPROVEMENTS: - -* env/ec2: update cpu metadata [[GH-16417](https://github.com/hashicorp/nomad/issues/16417)] - -BUG FIXES: - -* client: Fixed a bug that prevented allocations with interpolated values in Consul services from being marked as healthy [[GH-16402](https://github.com/hashicorp/nomad/issues/16402)] -* client: Fixed a bug where clients used the serf advertise address to connect to servers when using Consul auto-discovery [[GH-16217](https://github.com/hashicorp/nomad/issues/16217)] -* docker: Fixed a bug where pause containers would be erroneously removed [[GH-16352](https://github.com/hashicorp/nomad/issues/16352)] -* scheduler: Fixed a bug where collisions in dynamic port offerings would result in spurious plan-for-node-rejected errors [[GH-16401](https://github.com/hashicorp/nomad/issues/16401)] -* server: Fixed a bug where deregistering a job that was already garbage collected would create a new evaluation [[GH-16287](https://github.com/hashicorp/nomad/issues/16287)] -* server: Fixed a bug where node updates that produced errors from service discovery or CSI plugin updates were not logged [[GH-16287](https://github.com/hashicorp/nomad/issues/16287)] -* server: Fixed a bug where the `system reconcile summaries` command and API would not return any scheduler-related errors [[GH-16287](https://github.com/hashicorp/nomad/issues/16287)] - -## 1.4.5 (March 01, 2023) - -BREAKING CHANGES: - -* core: Ensure no leakage of evaluations for batch jobs. Prior to this change allocations and evaluations for batch jobs were never garbage collected until the batch job was explicitly stopped. The new `batch_eval_gc_threshold` server configuration controls how often they are collected. The default threshold is `24h`. [[GH-15097](https://github.com/hashicorp/nomad/issues/15097)] - -IMPROVEMENTS: - -* api: improved error returned from AllocFS.Logs when response is not JSON [[GH-15558](https://github.com/hashicorp/nomad/issues/15558)] -* cli: Added `-wait` flag to `deployment status` for use with `-monitor` mode [[GH-15262](https://github.com/hashicorp/nomad/issues/15262)] -* cli: Added tls command to enable creating Certificate Authority and Self signed TLS certificates. - There are two sub commands `tls ca` and `tls cert` that are helpers when creating certificates. [[GH-14296](https://github.com/hashicorp/nomad/issues/14296)] -* client: detect and cleanup leaked iptables rules [[GH-15407](https://github.com/hashicorp/nomad/issues/15407)] -* consul: add client configuration for grpc_ca_file [[GH-15701](https://github.com/hashicorp/nomad/issues/15701)] -* deps: Update google.golang.org/grpc to v1.51.0 [[GH-15402](https://github.com/hashicorp/nomad/issues/15402)] -* docs: link to an envoy troubleshooting doc when envoy bootstrap fails [[GH-15908](https://github.com/hashicorp/nomad/issues/15908)] -* env/ec2: update cpu metadata [[GH-15770](https://github.com/hashicorp/nomad/issues/15770)] -* fingerprint: Detect CNI plugins and set versions as node attributes [[GH-15452](https://github.com/hashicorp/nomad/issues/15452)] -* scheduler: allow using device IDs in `affinity` and `constraint` [[GH-15455](https://github.com/hashicorp/nomad/issues/15455)] -* ui: Add a button for expanding the Task sidebar to full width [[GH-15735](https://github.com/hashicorp/nomad/issues/15735)] -* ui: Made task rows in Allocation tables look more aligned with their parent [[GH-15363](https://github.com/hashicorp/nomad/issues/15363)] -* ui: Show events alongside logs in the Task sidebar [[GH-15733](https://github.com/hashicorp/nomad/issues/15733)] -* ui: The web UI will now show canary_tags of services anyplace we would normally show tags. [[GH-15458](https://github.com/hashicorp/nomad/issues/15458)] - -DEPRECATIONS: - -* api: The connect `ConsulExposeConfig.Path` field is deprecated in favor of `ConsulExposeConfig.Paths` [[GH-15541](https://github.com/hashicorp/nomad/issues/15541)] -* api: The connect `ConsulProxy.ExposeConfig` field is deprecated in favor of `ConsulProxy.Expose` [[GH-15541](https://github.com/hashicorp/nomad/issues/15541)] - -BUG FIXES: - -* acl: Fixed a bug in token creation which failed to parse expiration TTLs correctly [[GH-15999](https://github.com/hashicorp/nomad/issues/15999)] -* acl: Fixed a bug where creating/updating a policy which was invalid would return a 404 status code, not a 400 [[GH-16000](https://github.com/hashicorp/nomad/issues/16000)] -* agent: Make agent syslog log level follow log_level config [[GH-15625](https://github.com/hashicorp/nomad/issues/15625)] -* api: Added missing node states to NodeStatus constants [[GH-16166](https://github.com/hashicorp/nomad/issues/16166)] -* api: Fix stale querystring parameter value as boolean [[GH-15605](https://github.com/hashicorp/nomad/issues/15605)] -* api: Fixed a bug where exposeConfig field was not provided correctly when getting the jobs via the API [[GH-15541](https://github.com/hashicorp/nomad/issues/15541)] -* api: Fixed a nil pointer dereference when periodic jobs are missing their periodic spec [[GH-13845](https://github.com/hashicorp/nomad/issues/13845)] -* cgutil: handle panic coming from runc helper method [[GH-16180](https://github.com/hashicorp/nomad/issues/16180)] -* check: Add support for sending custom host header [[GH-15337](https://github.com/hashicorp/nomad/issues/15337)] -* cli: Fixed a bug where `nomad fmt -check` would overwrite the file being checked [[GH-16174](https://github.com/hashicorp/nomad/issues/16174)] -* cli: Fixed a panic in `deployment status` when rollback deployments are slow to appear [[GH-16011](https://github.com/hashicorp/nomad/issues/16011)] -* cli: corrected typos in ACL role create/delete CLI commands [[GH-15382](https://github.com/hashicorp/nomad/issues/15382)] -* cli: fix nomad fmt -check flag not returning error code [[GH-15797](https://github.com/hashicorp/nomad/issues/15797)] -* client: Fixed a bug where allocation cleanup hooks would not run [[GH-15477](https://github.com/hashicorp/nomad/issues/15477)] -* connect: ingress http/2/grpc listeners may exclude hosts [[GH-15749](https://github.com/hashicorp/nomad/issues/15749)] -* consul: Fixed a bug where acceptable service identity on Consul token was not accepted [[GH-15928](https://github.com/hashicorp/nomad/issues/15928)] -* consul: Fixed a bug where consul token was not respected when reverting a job [[GH-15996](https://github.com/hashicorp/nomad/issues/15996)] -* consul: Fixed a bug where services would continuously re-register when using ipv6 [[GH-15411](https://github.com/hashicorp/nomad/issues/15411)] -* consul: correctly interpret missing consul checks as unhealthy [[GH-15822](https://github.com/hashicorp/nomad/issues/15822)] -* core: enforce strict ordering that node status updates are recorded after allocation updates for reconnecting clients [[GH-15808](https://github.com/hashicorp/nomad/issues/15808)] -* csi: Fixed a bug where a crashing plugin could panic the Nomad client [[GH-15518](https://github.com/hashicorp/nomad/issues/15518)] -* csi: Fixed a bug where secrets that include '=' were incorrectly rejected [[GH-15670](https://github.com/hashicorp/nomad/issues/15670)] -* csi: Fixed a bug where volumes in non-default namespaces could not be scheduled for system or sysbatch jobs [[GH-15372](https://github.com/hashicorp/nomad/issues/15372)] -* csi: Fixed potential state store corruption when garbage collecting CSI volume claims or checking whether it's safe to force-deregister a volume [[GH-16256](https://github.com/hashicorp/nomad/issues/16256)] -* docker: Fixed a bug where images referenced by multiple tags would not be GC'd [[GH-15962](https://github.com/hashicorp/nomad/issues/15962)] -* docker: Fixed a bug where infra_image did not get alloc_id label [[GH-15898](https://github.com/hashicorp/nomad/issues/15898)] -* docker: configure restart policy for bridge network pause container [[GH-15732](https://github.com/hashicorp/nomad/issues/15732)] -* eval broker: Fixed a bug where the cancelable eval reaper used an incorrect lock when getting the set of cancelable evals from the broker [[GH-16112](https://github.com/hashicorp/nomad/issues/16112)] -* event stream: Fixed a bug where undefined ACL policies on the request's ACL would result in incorrect authentication errors [[GH-15495](https://github.com/hashicorp/nomad/issues/15495)] -* fix: Add the missing option propagation_mode for volume_mount [[GH-15626](https://github.com/hashicorp/nomad/issues/15626)] -* parser: Fixed a panic in the job spec parser when a variable validation block was missing its condition [[GH-16018](https://github.com/hashicorp/nomad/issues/16018)] -* scheduler (Enterprise): Fixed a bug that prevented new allocations from multiregion jobs to be placed in situations where other regions are not involved, such as node updates. [[GH-15325](https://github.com/hashicorp/nomad/issues/15325)] -* services: Fixed a bug where check_restart on nomad services on tasks failed with incorrect CheckIDs [[GH-16240](https://github.com/hashicorp/nomad/issues/16240)] -* template: Fixed a bug that caused the chage script to fail to run [[GH-15915](https://github.com/hashicorp/nomad/issues/15915)] -* template: Fixed a bug where the template runner's Nomad token would be erased by in-place updates to a task [[GH-16266](https://github.com/hashicorp/nomad/issues/16266)] -* ui: Fix allocation memory chart to display the same value as the CLI [[GH-15909](https://github.com/hashicorp/nomad/issues/15909)] -* ui: Fix navigation to pages for jobs that are not in the default namespace [[GH-15906](https://github.com/hashicorp/nomad/issues/15906)] -* ui: Fixed a bug where the exec window would not maintain namespace upon refresh [[GH-15454](https://github.com/hashicorp/nomad/issues/15454)] -* ui: Scale down logger height in the UI when the sidebar container also has task events [[GH-15759](https://github.com/hashicorp/nomad/issues/15759)] -* volumes: Fixed a bug where `per_alloc` was allowed for volume blocks on system and sysbatch jobs, which do not have an allocation index [[GH-16030](https://github.com/hashicorp/nomad/issues/16030)] - -## 1.4.4 (February 14, 2023) - -SECURITY: - -* artifact: Provide mitigations against unbounded artifact decompression [[GH-16126](https://github.com/hashicorp/nomad/issues/16126)] -* build: Update to go1.20.1 [[GH-16182](https://github.com/hashicorp/nomad/issues/16182)] - -## 1.4.3 (November 21, 2022) - -IMPROVEMENTS: - -* api: Added an API for counting evaluations that match a filter [[GH-15147](https://github.com/hashicorp/nomad/issues/15147)] -* cli: Improved performance of eval delete with large filter sets [[GH-15117](https://github.com/hashicorp/nomad/issues/15117)] -* consul: add trace logging around service registrations [[GH-6115](https://github.com/hashicorp/nomad/issues/6115)] -* deps: Updated github.com/aws/aws-sdk-go from 1.44.84 to 1.44.126 [[GH-15081](https://github.com/hashicorp/nomad/issues/15081)] -* deps: Updated github.com/docker/cli from 20.10.18+incompatible to 20.10.21+incompatible [[GH-15078](https://github.com/hashicorp/nomad/issues/15078)] -* exec: Allow running commands from mounted host volumes [[GH-14851](https://github.com/hashicorp/nomad/issues/14851)] -* scheduler: when multiple evaluations are pending for the same job, evaluate the latest and cancel the intermediaries on success [[GH-14621](https://github.com/hashicorp/nomad/issues/14621)] -* server: Add a git `revision` tag to the serf tags gossiped between servers. [[GH-9159](https://github.com/hashicorp/nomad/issues/9159)] -* template: Expose per-template configuration for `error_on_missing_key`. This allows jobspec authors to specify that a -template should fail if it references a struct or map key that does not exist. The default value is false and should be -fully backward compatible. [[GH-14002](https://github.com/hashicorp/nomad/issues/14002)] -* ui: Adds a "Pack" tag and logo on the jobs list index when appropriate [[GH-14833](https://github.com/hashicorp/nomad/issues/14833)] -* ui: add consul connect service upstream and on-update info to the service sidebar [[GH-15324](https://github.com/hashicorp/nomad/issues/15324)] -* ui: allow users to upload files by click or drag in the web ui [[GH-14747](https://github.com/hashicorp/nomad/issues/14747)] - -BUG FIXES: - -* api: Ensure all request body decode errors return a 400 status code [[GH-15252](https://github.com/hashicorp/nomad/issues/15252)] -* autopilot: Fixed a bug where autopilot would try to fetch raft stats from other regions [[GH-15290](https://github.com/hashicorp/nomad/issues/15290)] -* cleanup: fixed missing timer.Reset for plan queue stat emitter [[GH-15134](https://github.com/hashicorp/nomad/issues/15134)] -* client: Fixed a bug where tasks would restart without waiting for interval [[GH-15215](https://github.com/hashicorp/nomad/issues/15215)] -* client: fixed a bug where non-`docker` tasks with network isolation would leak network namespaces and iptables rules if the client was restarted while they were running [[GH-15214](https://github.com/hashicorp/nomad/issues/15214)] -* client: prevent allocations from failing on client reconnect by retrying RPC requests when no servers are available yet [[GH-15140](https://github.com/hashicorp/nomad/issues/15140)] -* csi: Fixed race condition that can cause a panic when volume is garbage collected [[GH-15101](https://github.com/hashicorp/nomad/issues/15101)] -* device: Fixed a bug where device plugins would not fingerprint on startup [[GH-15125](https://github.com/hashicorp/nomad/issues/15125)] -* drivers: Fixed a bug where one goroutine was leaked per task [[GH-15180](https://github.com/hashicorp/nomad/issues/15180)] -* drivers: pass missing `propagation_mode` configuration for volume mounts to external plugins [[GH-15096](https://github.com/hashicorp/nomad/issues/15096)] -* event_stream: fixed a bug where dynamic port values would fail to serialize in the event stream [[GH-12916](https://github.com/hashicorp/nomad/issues/12916)] -* fingerprint: Ensure Nomad can correctly fingerprint Consul gRPC where the Consul agent is running v1.14.0 or greater [[GH-15309](https://github.com/hashicorp/nomad/issues/15309)] -* keyring: Fixed a bug where a missing key would prevent any further replication. [[GH-15092](https://github.com/hashicorp/nomad/issues/15092)] -* keyring: Fixed a bug where replication would stop after snapshot restores [[GH-15227](https://github.com/hashicorp/nomad/issues/15227)] -* keyring: Re-enabled keyring garbage collection after fixing a bug where keys would be garbage collected even if they were used to sign a live allocation's workload identity. [[GH-15092](https://github.com/hashicorp/nomad/issues/15092)] -* scheduler: Fixed a bug that prevented disconnected allocations to be updated after they reconnect. [[GH-15068](https://github.com/hashicorp/nomad/issues/15068)] -* scheduler: Prevent unnecessary placements when disconnected allocations reconnect. [[GH-15068](https://github.com/hashicorp/nomad/issues/15068)] -* template: Fixed a bug where template could cause agent panic on startup [[GH-15192](https://github.com/hashicorp/nomad/issues/15192)] -* ui: Fixed a bug where the task log sidebar would close and re-open if the parent job state changed [[GH-15146](https://github.com/hashicorp/nomad/issues/15146)] -* variables: Fixed a bug where a long-running rekey could hit the nack timeout [[GH-15102](https://github.com/hashicorp/nomad/issues/15102)] -* wi: Fixed a bug where clients running pre-1.4.0 allocations would erase the token used to query service registrations after upgrade [[GH-15121](https://github.com/hashicorp/nomad/issues/15121)] - -## 1.4.2 (October 26, 2022) - -SECURITY: - -* event stream: Fixed a bug where ACL token expiration was not checked when emitting events [[GH-15013](https://github.com/hashicorp/nomad/issues/15013)] -* variables: Fixed a bug where non-sensitive variable metadata (paths and raft indexes) was exposed via the template `nomadVarList` function to other jobs in the same namespace. [[GH-15012](https://github.com/hashicorp/nomad/issues/15012)] - -IMPROVEMENTS: - -* cli: Added `-id-prefix-template` option to `nomad job dispatch` [[GH-14631](https://github.com/hashicorp/nomad/issues/14631)] -* cli: add nomad fmt to the CLI [[GH-14779](https://github.com/hashicorp/nomad/issues/14779)] -* deps: update go-memdb for goroutine leak fix [[GH-14983](https://github.com/hashicorp/nomad/issues/14983)] -* docker: improve memory usage for docker_logger [[GH-14875](https://github.com/hashicorp/nomad/issues/14875)] -* event stream: Added ACL role topic with create and delete types [[GH-14923](https://github.com/hashicorp/nomad/issues/14923)] -* scheduler: Allow jobs not requiring network resources even when no network is fingerprinted [[GH-14300](https://github.com/hashicorp/nomad/issues/14300)] -* ui: adds searching and filtering to the topology page [[GH-14913](https://github.com/hashicorp/nomad/issues/14913)] - -BUG FIXES: - -* acl: Callers should be able to read policies linked via roles to the token used [[GH-14982](https://github.com/hashicorp/nomad/issues/14982)] -* acl: Ensure all federated servers meet v.1.4.0 minimum before ACL roles can be written [[GH-14908](https://github.com/hashicorp/nomad/issues/14908)] -* acl: Fixed a bug where Nomad version checking for one-time tokens was enforced across regions [[GH-14912](https://github.com/hashicorp/nomad/issues/14912)] -* cli: prevent a panic when the Nomad API returns an error while collecting a debug bundle [[GH-14992](https://github.com/hashicorp/nomad/issues/14992)] -* client: Check ACL token expiry when resolving token within ACL cache [[GH-14922](https://github.com/hashicorp/nomad/issues/14922)] -* client: Fixed a bug where Nomad could not detect cores on recent RHEL systems [[GH-15027](https://github.com/hashicorp/nomad/issues/15027)] -* client: Fixed a bug where network fingerprinters were not reloaded when the client configuration was reloaded with SIGHUP [[GH-14615](https://github.com/hashicorp/nomad/issues/14615)] -* client: Resolve ACL roles within client ACL cache [[GH-14922](https://github.com/hashicorp/nomad/issues/14922)] -* consul: Fixed a bug where services continuously re-registered [[GH-14917](https://github.com/hashicorp/nomad/issues/14917)] -* consul: atomically register checks on initial service registration [[GH-14944](https://github.com/hashicorp/nomad/issues/14944)] -* deps: Update hashicorp/consul-template to 90370e07bf621811826b803fb633dadbfb4cf287; fixes template rerendering issues when only user or group set [[GH-15045](https://github.com/hashicorp/nomad/issues/15045)] -* deps: Update hashicorp/raft to v1.3.11; fixes unstable leadership on server removal [[GH-15021](https://github.com/hashicorp/nomad/issues/15021)] -* event stream: Check ACL token expiry when resolving tokens [[GH-14923](https://github.com/hashicorp/nomad/issues/14923)] -* event stream: Resolve ACL roles within ACL tokens [[GH-14923](https://github.com/hashicorp/nomad/issues/14923)] -* keyring: Fixed a bug where `nomad system gc` forced a root keyring rotation. [[GH-15009](https://github.com/hashicorp/nomad/issues/15009)] -* keyring: Fixed a bug where if a key is rotated immediately following a leader election, plans that are in-flight may get signed before the new leader has the key. Allow for a short timeout-and-retry to avoid rejecting plans. [[GH-14987](https://github.com/hashicorp/nomad/issues/14987)] -* keyring: Fixed a bug where keyring initialization is blocked by un-upgraded federated regions [[GH-14901](https://github.com/hashicorp/nomad/issues/14901)] -* keyring: Fixed a bug where root keyring garbage collection configuration values were not respected. [[GH-15009](https://github.com/hashicorp/nomad/issues/15009)] -* keyring: Fixed a bug where root keyring initialization could occur before the raft FSM on the leader was verified to be up-to-date. [[GH-14987](https://github.com/hashicorp/nomad/issues/14987)] -* keyring: Fixed a bug where root keyring replication could make incorrectly stale queries and exit early if those queries did not return the expected key. [[GH-14987](https://github.com/hashicorp/nomad/issues/14987)] -* keyring: Fixed a bug where the root keyring replicator's rate limiting would be skipped if the keyring replication exceeded the burst rate. [[GH-14987](https://github.com/hashicorp/nomad/issues/14987)] -* keyring: Removed root key garbage collection to avoid orphaned workload identities [[GH-15034](https://github.com/hashicorp/nomad/issues/15034)] -* nomad native service discovery: Ensure all local servers meet v.1.3.0 minimum before service registrations can be written [[GH-14924](https://github.com/hashicorp/nomad/issues/14924)] -* scheduler: Fixed a bug where version checking for disconnected clients handling was enforced across regions [[GH-14912](https://github.com/hashicorp/nomad/issues/14912)] -* servicedisco: Fixed a bug where job using checks could land on incompatible client [[GH-14868](https://github.com/hashicorp/nomad/issues/14868)] -* services: Fixed a regression where check task validation stopped allowing some configurations [[GH-14864](https://github.com/hashicorp/nomad/issues/14864)] -* ui: Fixed line charts to update x-axis (time) where relevant [[GH-14814](https://github.com/hashicorp/nomad/issues/14814)] -* ui: Fixes an issue where service tags would bleed past the edge of the screen [[GH-14832](https://github.com/hashicorp/nomad/issues/14832)] -* variables: Fixed a bug where Nomad version checking was not enforced for writing to variables [[GH-14912](https://github.com/hashicorp/nomad/issues/14912)] -* variables: Fixed a bug where getting empty results from listing variables resulted in a permission denied error. [[GH-15012](https://github.com/hashicorp/nomad/issues/15012)] - -## 1.4.1 (October 06, 2022) - -BUG FIXES: - -* keyring: Fixed a panic that can occur during upgrades to 1.4.0 when initializing the keyring [[GH-14821](https://github.com/hashicorp/nomad/issues/14821)] - -## 1.4.0 (October 04, 2022) - -FEATURES: - -* **ACL Roles:** Added support for ACL Roles. [[GH-14320](https://github.com/hashicorp/nomad/issues/14320)] -* **Nomad Native Service Discovery**: Add built-in support for checks on Nomad services [[GH-13715](https://github.com/hashicorp/nomad/issues/13715)] -* **Variables:** Added support for storing encrypted configuration values. [[GH-13000](https://github.com/hashicorp/nomad/issues/13000)] -* **UI Services table:** Display task-level services in addition to group-level services. [[GH-14199](https://github.com/hashicorp/nomad/issues/14199)] - -BREAKING CHANGES: - -* audit (Enterprise): fixed inconsistency in event filter logic [[GH-14212](https://github.com/hashicorp/nomad/issues/14212)] -* cli: `eval status -json` no longer supports listing all evals in JSON. Use `eval list -json`. [[GH-14651](https://github.com/hashicorp/nomad/issues/14651)] -* core: remove support for raft protocol version 2 [[GH-13467](https://github.com/hashicorp/nomad/issues/13467)] - -SECURITY: - -* client: recover from panics caused by artifact download to prevent the Nomad client from crashing [[GH-14696](https://github.com/hashicorp/nomad/issues/14696)] - -IMPROVEMENTS: - -* acl: ACL tokens can now be created with an expiration TTL. [[GH-14320](https://github.com/hashicorp/nomad/issues/14320)] -* api: return a more descriptive error when /v1/acl/bootstrap fails to decode request body [[GH-14629](https://github.com/hashicorp/nomad/issues/14629)] -* autopilot: upgrade to raft-autopilot library [[GH-14441](https://github.com/hashicorp/nomad/issues/14441)] -* cli: Removed deprecated network quota fields from `quota status` output [[GH-14468](https://github.com/hashicorp/nomad/issues/14468)] -* cli: `acl policy info` output format has changed to improve readability with large policy documents [[GH-14140](https://github.com/hashicorp/nomad/issues/14140)] -* cli: `operator debug` now writes newline-delimited JSON files for large collections [[GH-14610](https://github.com/hashicorp/nomad/issues/14610)] -* cli: ignore `-hcl2-strict` when -hcl1 is set. [[GH-14426](https://github.com/hashicorp/nomad/issues/14426)] -* cli: warn destructive update only when count is greater than 1 [[GH-13103](https://github.com/hashicorp/nomad/issues/13103)] -* client: Add built-in support for checks on nomad services [[GH-13715](https://github.com/hashicorp/nomad/issues/13715)] -* client: re-enable nss-based user lookups [[GH-14742](https://github.com/hashicorp/nomad/issues/14742)] -* connect: add namespace, job, and group to Envoy stats [[GH-14311](https://github.com/hashicorp/nomad/issues/14311)] -* connect: add nomad environment variables to envoy bootstrap [[GH-12959](https://github.com/hashicorp/nomad/issues/12959)] -* consul: Allow interpolation of task environment values into Consul Service Mesh configuration [[GH-14445](https://github.com/hashicorp/nomad/issues/14445)] -* consul: Enable setting custom tagged_addresses field [[GH-12951](https://github.com/hashicorp/nomad/issues/12951)] -* core: constraint operands are now compared numerically if operands are numbers [[GH-14722](https://github.com/hashicorp/nomad/issues/14722)] -* deps: Update fsouza/go-dockerclient to v1.8.2 [[GH-14112](https://github.com/hashicorp/nomad/issues/14112)] -* deps: Update go.etcd.io/bbolt to v1.3.6 [[GH-14025](https://github.com/hashicorp/nomad/issues/14025)] -* deps: Update google.golang.org/grpc to v1.48.0 [[GH-14103](https://github.com/hashicorp/nomad/issues/14103)] -* deps: Update gopsutil for improvements in fingerprinting on non-Linux platforms [[GH-14209](https://github.com/hashicorp/nomad/issues/14209)] -* deps: Updated `github.com/armon/go-metrics` to `v0.4.1` which includes a performance improvement for Prometheus sink [[GH-14493](https://github.com/hashicorp/nomad/issues/14493)] -* deps: Updated `github.com/hashicorp/go-version` to `v1.6.0` [[GH-14364](https://github.com/hashicorp/nomad/issues/14364)] -* deps: remove unused darwin C library [[GH-13894](https://github.com/hashicorp/nomad/issues/13894)] -* fingerprint: Add node attribute for number of reservable cores: `cpu.num_reservable_cores` [[GH-14694](https://github.com/hashicorp/nomad/issues/14694)] -* fingerprint: Consul and Vault attributes are no longer cleared on fingerprinting failure [[GH-14673](https://github.com/hashicorp/nomad/issues/14673)] -* jobspec: Added `strlen` HCL2 function to determine the length of a string [[GH-14463](https://github.com/hashicorp/nomad/issues/14463)] -* server: Log when a node's eligibility changes [[GH-14125](https://github.com/hashicorp/nomad/issues/14125)] -* ui: Display different message when trying to exec into a job with no task running. [[GH-14071](https://github.com/hashicorp/nomad/issues/14071)] -* ui: add service discovery, along with health checks, to job and allocation routes [[GH-14408](https://github.com/hashicorp/nomad/issues/14408)] -* ui: adds a sidebar to show in-page logs for a given task, accessible via job, client, or task group routes [[GH-14612](https://github.com/hashicorp/nomad/issues/14612)] -* ui: allow deep-dive clicks to tasks from client, job, and task group routes. [[GH-14592](https://github.com/hashicorp/nomad/issues/14592)] -* ui: attach timestamps and a visual indicator on failure to health checks in the Web UI [[GH-14677](https://github.com/hashicorp/nomad/issues/14677)] - -BUG FIXES: - -* api: Fixed a bug where the List Volume API did not include the `ControllerRequired` and `ResourceExhausted` fields. [[GH-14484](https://github.com/hashicorp/nomad/issues/14484)] -* cli: Ignore Vault token when generating job diff. [[GH-14424](https://github.com/hashicorp/nomad/issues/14424)] -* cli: fixed a bug in the `operator api` command where the HTTPS scheme was not always correctly calculated [[GH-14635](https://github.com/hashicorp/nomad/issues/14635)] -* cli: return exit code `255` when `nomad job plan` fails job validation. [[GH-14426](https://github.com/hashicorp/nomad/issues/14426)] -* cli: set content length on POST requests when using the `nomad operator api` command [[GH-14634](https://github.com/hashicorp/nomad/issues/14634)] -* client: Fixed bug where clients could attempt to connect to servers with invalid addresses retrieved from Consul. [[GH-14431](https://github.com/hashicorp/nomad/issues/14431)] -* core: prevent new allocations from overlapping execution with stopping allocations [[GH-10446](https://github.com/hashicorp/nomad/issues/10446)] -* csi: Fixed a bug where a volume that was successfully unmounted by the client but then failed controller unpublishing would not be marked free until garbage collection ran. [[GH-14675](https://github.com/hashicorp/nomad/issues/14675)] -* csi: Fixed a bug where the server would not send controller unpublish for a failed allocation. [[GH-14484](https://github.com/hashicorp/nomad/issues/14484)] -* csi: Fixed a data race in the volume unpublish endpoint that could result in claims being incorrectly marked as freed before being persisted to raft. [[GH-14484](https://github.com/hashicorp/nomad/issues/14484)] -* helpers: Fixed a bug where random stagger func did not protect against negative inputs [[GH-14497](https://github.com/hashicorp/nomad/issues/14497)] -* jobspec: Fixed a bug where an `artifact` with `headers` configuration would fail to parse when using HCLv1 [[GH-14637](https://github.com/hashicorp/nomad/issues/14637)] -* metrics: Update client `node_scheduling_eligibility` value with server heartbeats. [[GH-14483](https://github.com/hashicorp/nomad/issues/14483)] -* quotas (Enterprise): Fixed a server crashing panic when updating and checking a quota concurrently. -* rpc (Enterprise): check for spec changes in all regions when registering multiregion jobs [[GH-14519](https://github.com/hashicorp/nomad/issues/14519)] -* scheduler (Enterprise): Fixed bug where the scheduler would treat multiregion jobs as paused for job types that don't use deployments [[GH-14659](https://github.com/hashicorp/nomad/issues/14659)] -* template: Fixed a bug where the `splay` timeout was not being applied when `change_mode` was set to `script`. [[GH-14749](https://github.com/hashicorp/nomad/issues/14749)] -* ui: Remove extra space when displaying the version in the menu footer. [[GH-14457](https://github.com/hashicorp/nomad/issues/14457)] - ## 1.3.16 (August 18, 2023) BUG FIXES: diff --git a/CHANGELOG.md b/CHANGELOG.md index eca4ba50604d..08c2b2b86301 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -197,102 +197,6 @@ BUG FIXES: * vault: Fixed a bug where poststop tasks would not get a Vault token [[GH-19268](https://github.com/hashicorp/nomad/issues/19268)] * vault: Fixed an issue that could cause Nomad to attempt to renew a Vault token that is already expired [[GH-18985](https://github.com/hashicorp/nomad/issues/18985)] -## 1.6.9 (March 12, 2024) - -SECURITY: - -* build: Update to go1.22 to address Go standard library vulnerabilities CVE-2024-24783, CVE-2023-45290, and CVE-2024-24785. [[GH-20066](https://github.com/hashicorp/nomad/issues/20066)] -* deps: Upgrade protobuf library to 1.33.0 to avoid scan alerts for CVE-2024-24786, which Nomad is not vulnerable to [[GH-20100](https://github.com/hashicorp/nomad/issues/20100)] - -BUG FIXES: - -* cli: Fixed a bug where the `nomad job restart` command could crash if the job type was not present in a response from the server [[GH-20049](https://github.com/hashicorp/nomad/issues/20049)] -* client: Fixed a bug where corrupt client state could panic the client [[GH-19972](https://github.com/hashicorp/nomad/issues/19972)] -* cni: Fixed a bug where DNS set by CNI plugins was not provided to task drivers [[GH-20007](https://github.com/hashicorp/nomad/issues/20007)] -* connect: Fixed a bug where `expose` blocks would not appear in `job plan` diff output [[GH-19990](https://github.com/hashicorp/nomad/issues/19990)] - -## 1.6.8 (February 13, 2024) - -SECURITY: - -* windows: Remove `LazyDLL` calls for system modules to harden Nomad against attacks from the host [[GH-19925](https://github.com/hashicorp/nomad/issues/19925)] - -BUG FIXES: - -* cli: Fix return code when `nomad job run` succeeds after a blocked eval [[GH-19876](https://github.com/hashicorp/nomad/issues/19876)] -* cli: Fixed a bug where the `nomad tls ca create` command failed when the `-domain` was used without other values [[GH-19892](https://github.com/hashicorp/nomad/issues/19892)] -* connect: Fixed envoy sidecars being unable to restart after node reboots [[GH-19787](https://github.com/hashicorp/nomad/issues/19787)] -* exec: Fixed a bug in `alloc exec` where closing websocket streams could cause a panic [[GH-19932](https://github.com/hashicorp/nomad/issues/19932)] -* scheduler: Fixed a bug that caused blocked evaluations due to port conflict to not have a reason explaining why the evaluation was blocked [[GH-19933](https://github.com/hashicorp/nomad/issues/19933)] -* ui: Fix an issue where a same-named task from a different group could be selected when the user clicks Exec from a task group page where multiple allocations would be valid [[GH-19878](https://github.com/hashicorp/nomad/issues/19878)] - -## 1.6.7 (February 08, 2024) - -SECURITY: - -* deps: Updated runc to 1.1.12 to address CVE-2024-21626 [[GH-19851](https://github.com/hashicorp/nomad/issues/19851)] -* migration: Fixed a bug where archives used for migration were not checked for symlinks that escaped the allocation directory [[GH-19887](https://github.com/hashicorp/nomad/issues/19887)] -* template: Fixed a bug where symlinks could force templates to read and write to arbitrary locations (CVE-2024-1329) [[GH-19888](https://github.com/hashicorp/nomad/issues/19888)] - -## 1.6.6 (January 15, 2024) - -IMPROVEMENTS: - -* build: update to go 1.21.6 [[GH-19709](https://github.com/hashicorp/nomad/issues/19709)] - -BUG FIXES: - -* acl: Fixed auth method hashing which meant changing some fields would be silently ignored [[GH-19677](https://github.com/hashicorp/nomad/issues/19677)] -* auth: Added new optional OIDCDisableUserInfo setting for OIDC auth provider [[GH-19566](https://github.com/hashicorp/nomad/issues/19566)] -* core: Ensure job HCL submission data is persisted and restored during the FSM snapshot process [[GH-19605](https://github.com/hashicorp/nomad/issues/19605)] -* namespaces: Failed delete calls no longer return success codes [[GH-19483](https://github.com/hashicorp/nomad/issues/19483)] -* server: Fix server not waiting for workers to submit nacks for dequeued evaluations before shutting down [[GH-19560](https://github.com/hashicorp/nomad/issues/19560)] -* state: Fixed a bug where purged jobs would not get new deployments [[GH-19609](https://github.com/hashicorp/nomad/issues/19609)] - -## 1.6.5 (December 13, 2023) - -BUG FIXES: - -* cli: Fix a bug in the `var put` command which prevented combining items as CLI arguments and other parameters as flags [[GH-19423](https://github.com/hashicorp/nomad/issues/19423)] -* client: remove incomplete allocation entries from client state database during client restarts [[GH-16638](https://github.com/hashicorp/nomad/issues/16638)] -* connect: Fixed a bug where deployments would not wait for Connect sidecar task health checks to pass [[GH-19334](https://github.com/hashicorp/nomad/issues/19334)] -* consul: uses token namespace to fetch policies for verification [[GH-18516](https://github.com/hashicorp/nomad/issues/18516)] -* csi: Added validation to `csi_plugin` blocks to prevent `stage_publish_base_dir` from being a subdirectory of `mount_dir` [[GH-19441](https://github.com/hashicorp/nomad/issues/19441)] -* metrics: Revert upgrade of `go-metrics` to fix an issue where metrics from dependencies, such as raft, were no longer emitted [[GH-19375](https://github.com/hashicorp/nomad/issues/19375)] - -## 1.6.4 (December 07, 2023) - -BREAKING CHANGES: - -* core: Honor job's namespace when checking `distinct_hosts` feasibility [[GH-19004](https://github.com/hashicorp/nomad/issues/19004)] - -SECURITY: - -* build: Update to go1.21.4 to resolve Windows path validation CVE in Go [[GH-19013](https://github.com/hashicorp/nomad/issues/19013)] -* build: Update to go1.21.5 to resolve Windows path validation CVE in Go [[GH-19320](https://github.com/hashicorp/nomad/issues/19320)] - -IMPROVEMENTS: - -* cli: Add file prediction for operator raft/snapshot commands [[GH-18901](https://github.com/hashicorp/nomad/issues/18901)] -* ui: color-code node and server status cells [[GH-18318](https://github.com/hashicorp/nomad/issues/18318)] -* ui: show plan output warnings alongside placement failures and dry-run info when running a job through the web ui [[GH-19225](https://github.com/hashicorp/nomad/issues/19225)] - -BUG FIXES: - -* agent: Correct websocket status code handling [[GH-19172](https://github.com/hashicorp/nomad/issues/19172)] -* api: Fix panic in `Allocation.Stub` method when `Job` is unset [[GH-19115](https://github.com/hashicorp/nomad/issues/19115)] -* cli: Fixed a bug that caused the `nomad job restart` command to miscount the allocations to restart [[GH-19155](https://github.com/hashicorp/nomad/issues/19155)] -* cli: Fixed a panic when the `nomad job restart` command received an interrupt signal while waiting for an answer [[GH-19154](https://github.com/hashicorp/nomad/issues/19154)] -* cli: Fixed the `nomad job restart` command to create replacements for batch and system jobs and to prevent sysbatch jobs from being rescheduled since they never create replacements [[GH-19147](https://github.com/hashicorp/nomad/issues/19147)] -* client: Fixed a bug where client API calls would fail incorrectly with permission denied errors when using ACL tokens with dangling policies [[GH-18972](https://github.com/hashicorp/nomad/issues/18972)] -* core: Fix incorrect submit time for stopped jobs [[GH-18967](https://github.com/hashicorp/nomad/issues/18967)] -* ui: Fixed an issue where purging a job with a namespace did not process correctly [[GH-19139](https://github.com/hashicorp/nomad/issues/19139)] -* ui: fix an issue where starting a stopped job with default-less variables would not retain those variables when done via the job page start button in the web ui [[GH-19220](https://github.com/hashicorp/nomad/issues/19220)] -* ui: fix the job auto-linked variable path name when user lacks variable write permissions [[GH-18598](https://github.com/hashicorp/nomad/issues/18598)] -* variables: Fixed a bug where poststop tasks were not allowed access to Variables [[GH-19270](https://github.com/hashicorp/nomad/issues/19270)] -* vault: Fixed a bug where poststop tasks would not get a Vault token [[GH-19268](https://github.com/hashicorp/nomad/issues/19268)] -* vault: Fixed an issue that could cause Nomad to attempt to renew a Vault token that is already expired [[GH-18985](https://github.com/hashicorp/nomad/issues/18985)] - ## 1.6.3 (October 30, 2023) SECURITY: @@ -479,90 +383,6 @@ BUG FIXES: * ui: fixes an issue where the allocations table on child (periodic, parameterized) job pages wouldn't update when accessed via their parent [[GH-17214](https://github.com/hashicorp/nomad/issues/17214)] * ui: preserve newlines when displaying shown variables in non-json mode [[GH-17343](https://github.com/hashicorp/nomad/issues/17343)] -## 1.5.16 (March 12, 2024) - -SECURITY: - -* build: Update to go1.22 to address Go standard library vulnerabilities CVE-2024-24783, CVE-2023-45290, and CVE-2024-24785. [[GH-20066](https://github.com/hashicorp/nomad/issues/20066)] -* deps: Upgrade protobuf library to 1.33.0 to avoid scan alerts for CVE-2024-24786, which Nomad is not vulnerable to [[GH-20100](https://github.com/hashicorp/nomad/issues/20100)] - -BUG FIXES: - -* cli: Fixed a bug where the `nomad job restart` command could crash if the job type was not present in a response from the server [[GH-20049](https://github.com/hashicorp/nomad/issues/20049)] -* client: Fixed a bug where corrupt client state could panic the client [[GH-19972](https://github.com/hashicorp/nomad/issues/19972)] -* cni: Fixed a bug where DNS set by CNI plugins was not provided to task drivers [[GH-20007](https://github.com/hashicorp/nomad/issues/20007)] -* connect: Fixed a bug where `expose` blocks would not appear in `job plan` diff output [[GH-19990](https://github.com/hashicorp/nomad/issues/19990)] - -## 1.5.15 (February 13, 2024) - -SECURITY: - -* windows: Remove `LazyDLL` calls for system modules to harden Nomad against attacks from the host [[GH-19925](https://github.com/hashicorp/nomad/issues/19925)] - -BUG FIXES: - -* cli: Fix return code when `nomad job run` succeeds after a blocked eval [[GH-19876](https://github.com/hashicorp/nomad/issues/19876)] -* connect: Fixed envoy sidecars being unable to restart after node reboots [[GH-19787](https://github.com/hashicorp/nomad/issues/19787)] -* exec: Fixed a bug in `alloc exec` where closing websocket streams could cause a panic [[GH-19932](https://github.com/hashicorp/nomad/issues/19932)] -* scheduler: Fixed a bug that caused blocked evaluations due to port conflict to not have a reason explaining why the evaluation was blocked [[GH-19933](https://github.com/hashicorp/nomad/issues/19933)] -* ui: Fix an issue where a same-named task from a different group could be selected when the user clicks Exec from a task group page where multiple allocations would be valid [[GH-19878](https://github.com/hashicorp/nomad/issues/19878)] - -## 1.5.14 (February 08, 2024) - -SECURITY: - -* deps: Updated runc to 1.1.12 to address CVE-2024-21626 [[GH-19851](https://github.com/hashicorp/nomad/issues/19851)] -* migration: Fixed a bug where archives used for migration were not checked for symlinks that escaped the allocation directory [[GH-19887](https://github.com/hashicorp/nomad/issues/19887)] -* template: Fixed a bug where symlinks could force templates to read and write to arbitrary locations (CVE-2024-1329) [[GH-19888](https://github.com/hashicorp/nomad/issues/19888)] - -## 1.5.13 (January 15, 2024) - -IMPROVEMENTS: - -* build: update to go 1.21.6 [[GH-19709](https://github.com/hashicorp/nomad/issues/19709)] - -BUG FIXES: - -* acl: Fixed auth method hashing which meant changing some fields would be silently ignored [[GH-19677](https://github.com/hashicorp/nomad/issues/19677)] -* auth: Added new optional OIDCDisableUserInfo setting for OIDC auth provider [[GH-19566](https://github.com/hashicorp/nomad/issues/19566)] -* namespaces: Failed delete calls no longer return success codes [[GH-19483](https://github.com/hashicorp/nomad/issues/19483)] -* server: Fix server not waiting for workers to submit nacks for dequeued evaluations before shutting down [[GH-19560](https://github.com/hashicorp/nomad/issues/19560)] -* state: Fixed a bug where purged jobs would not get new deployments [[GH-19609](https://github.com/hashicorp/nomad/issues/19609)] - -## 1.5.12 (December 13, 2023) - -BUG FIXES: - -* cli: Fix a bug in the `var put` command which prevented combining items as CLI arguments and other parameters as flags [[GH-19423](https://github.com/hashicorp/nomad/issues/19423)] -* client: remove incomplete allocation entries from client state database during client restarts [[GH-16638](https://github.com/hashicorp/nomad/issues/16638)] -* connect: Fixed a bug where deployments would not wait for Connect sidecar task health checks to pass [[GH-19334](https://github.com/hashicorp/nomad/issues/19334)] -* consul: uses token namespace to fetch policies for verification [[GH-18516](https://github.com/hashicorp/nomad/issues/18516)] -* csi: Added validation to `csi_plugin` blocks to prevent `stage_publish_base_dir` from being a subdirectory of `mount_dir` [[GH-19441](https://github.com/hashicorp/nomad/issues/19441)] -* metrics: Revert upgrade of `go-metrics` to fix an issue where metrics from dependencies, such as raft, were no longer emitted [[GH-19376](https://github.com/hashicorp/nomad/issues/19376)] - -## 1.5.11 (December 07, 2023) - -BREAKING CHANGES: - -* core: Honor job's namespace when checking `distinct_hosts` feasibility [[GH-19004](https://github.com/hashicorp/nomad/issues/19004)] - -SECURITY: - -* build: Update to go1.21.5 to resolve Windows path validation CVE in Go [[GH-19320](https://github.com/hashicorp/nomad/issues/19320)] - -BUG FIXES: - -* agent: Correct websocket status code handling [[GH-19172](https://github.com/hashicorp/nomad/issues/19172)] -* api: Fix panic in `Allocation.Stub` method when `Job` is unset [[GH-19115](https://github.com/hashicorp/nomad/issues/19115)] -* cli: Fixed a panic when the `nomad job restart` command received an interrupt signal while waiting for an answer [[GH-19154](https://github.com/hashicorp/nomad/issues/19154)] -* cli: Fixed the `nomad job restart` command to create replacements for batch and system jobs and to prevent sysbatch jobs from being rescheduled since they never create replacements [[GH-19147](https://github.com/hashicorp/nomad/issues/19147)] -* client: Fixed a bug where client API calls would fail incorrectly with permission denied errors when using ACL tokens with dangling policies [[GH-18972](https://github.com/hashicorp/nomad/issues/18972)] -* core: Fix incorrect submit time for stopped jobs [[GH-18967](https://github.com/hashicorp/nomad/issues/18967)] -* ui: Fixed an issue where purging a job with a namespace did not process correctly [[GH-19139](https://github.com/hashicorp/nomad/issues/19139)] -* variables: Fixed a bug where poststop tasks were not allowed access to Variables [[GH-19270](https://github.com/hashicorp/nomad/issues/19270)] -* vault: Fixed a bug where poststop tasks would not get a Vault token [[GH-19268](https://github.com/hashicorp/nomad/issues/19268)] -* vault: Fixed an issue that could cause Nomad to attempt to renew a Vault token that is already expired [[GH-18985](https://github.com/hashicorp/nomad/issues/18985)] - ## 1.5.10 (October 30, 2023) SECURITY: @@ -948,6 +768,427 @@ BUG FIXES: * ui: Scale down logger height in the UI when the sidebar container also has task events [[GH-15759](https://github.com/hashicorp/nomad/issues/15759)] * volumes: Fixed a bug where `per_alloc` was allowed for volume blocks on system and sysbatch jobs, which do not have an allocation index [[GH-16030](https://github.com/hashicorp/nomad/issues/16030)] +## 1.4.14 (October 30, 2023) + +SECURITY: + +* build: Update to Go 1.21.3 [[GH-18717](https://github.com/hashicorp/nomad/issues/18717)] + +BUG FIXES: + +* build: Add `timetzdata` Go build tag on Windows binaries to embed time zone data so periodic jobs are able to specify a time zone value on Windows environments [[GH-18676](https://github.com/hashicorp/nomad/issues/18676)] +* cli: Fixed an unexpected behavior of the `nomad acl token update` command that could cause a management token to be downgraded to client on update [[GH-18689](https://github.com/hashicorp/nomad/issues/18689)] +* client: prevent tasks from starting without the prestart hooks running [[GH-18662](https://github.com/hashicorp/nomad/issues/18662)] +* csi: check controller plugin health early during volume register/create [[GH-18570](https://github.com/hashicorp/nomad/issues/18570)] +* metrics: Fixed a bug where CPU counters could report errors for negative values [[GH-18835](https://github.com/hashicorp/nomad/issues/18835)] +* scaling: Unblock blocking queries to /v1/job/{job-id}/scale if the job goes away [[GH-18637](https://github.com/hashicorp/nomad/issues/18637)] +* scheduler (Enterprise): auto-unblock evals with associated quotas when node resources are freed up [[GH-18838](https://github.com/hashicorp/nomad/issues/18838)] +* scheduler: Ensure duplicate allocation indexes are tracked and fixed when performing job updates [[GH-18873](https://github.com/hashicorp/nomad/issues/18873)] +* services: use interpolated address when performing nomad service health checks [[GH-18584](https://github.com/hashicorp/nomad/issues/18584)] + +## 1.4.13 (September 13, 2023) + +IMPROVEMENTS: + +* build: Update to Go 1.21.0 [[GH-18184](https://github.com/hashicorp/nomad/issues/18184)] +* raft: remove use of deprecated Leader func [[GH-18352](https://github.com/hashicorp/nomad/issues/18352)] + +BUG FIXES: + +* acl: Fixed a bug where ACL tokens linked to ACL roles containing duplicate policies would cause erronous permission denined responses [[GH-18419](https://github.com/hashicorp/nomad/issues/18419)] +* cli: Add missing help message for the `-consul-namespace` flag in the `nomad job run` command [[GH-18081](https://github.com/hashicorp/nomad/issues/18081)] +* cli: Fixed a bug that prevented CSI volumes in namespaces other than `default` from being displayed in the `nomad node status -verbose` output [[GH-17925](https://github.com/hashicorp/nomad/issues/17925)] +* cli: Snapshot name is required in `volume snapshot create` command [[GH-17958](https://github.com/hashicorp/nomad/issues/17958)] +* client: Fixed a bug where the state of poststop tasks could be corrupted by client gc [[GH-17971](https://github.com/hashicorp/nomad/issues/17971)] +* client: Ignore stale server updates to prevent GCing allocations that should be running [[GH-18269](https://github.com/hashicorp/nomad/issues/18269)] +* client: return 404 instead of 500 when trying to access logs and files from allocations that have been garbage collected [[GH-18232](https://github.com/hashicorp/nomad/issues/18232)] +* core: Fixed a bug where exponential backoff could result in excessive CPU usage [[GH-18200](https://github.com/hashicorp/nomad/issues/18200)] +* csi: fixed a bug that could case a panic when deleting volumes [[GH-18234](https://github.com/hashicorp/nomad/issues/18234)] +* fingerprint: fix 'default' alias not being added to interface specified by network_interface [[GH-18096](https://github.com/hashicorp/nomad/issues/18096)] +* jobspec: Add diff for Task Group scaling block [[GH-18332](https://github.com/hashicorp/nomad/issues/18332)] +* migration: Fixed a bug where previous alloc logs were destroyed when migrating ephemeral_disk on the same client [[GH-18108](https://github.com/hashicorp/nomad/issues/18108)] +* scheduler: Fixed a bug where device IDs were not correctly filtered in constraints [[GH-18141](https://github.com/hashicorp/nomad/issues/18141)] +* services: Add validation message when `tls_skip_verify` is set to `true` on a Nomad service [[GH-18333](https://github.com/hashicorp/nomad/issues/18333)] + +## 1.4.12 (July 21, 2023) + +BUG FIXES: + +* csi: Fixed a bug in sending concurrent requests to CSI controller plugins by serializing them per plugin [[GH-17996](https://github.com/hashicorp/nomad/issues/17996)] +* csi: Fixed a bug where CSI controller requests could be sent to unhealthy plugins [[GH-17996](https://github.com/hashicorp/nomad/issues/17996)] +* csi: Fixed a bug where CSI controller requests could not be sent to controllers on nodes ineligible for scheduling [[GH-17996](https://github.com/hashicorp/nomad/issues/17996)] +* services: Fixed a bug that prevented passing query parameters in Nomad native service discovery HTTP health check paths [[GH-17936](https://github.com/hashicorp/nomad/issues/17936)] +* ui: Fixed a bug that prevented nodes from being filtered by the "Ineligible" and "Draining" state filters [[GH-17940](https://github.com/hashicorp/nomad/issues/17940)] +* ui: Fixed error handling for cross-region requests when the receiving region does not implement the endpoint being requested [[GH-18020](https://github.com/hashicorp/nomad/issues/18020)] + +## 1.4.11 (July 18, 2023) + +SECURITY: + +* acl: Fixed a bug where a namespace ACL policy without label was applied to an unexpected namespace. [CVE-2023-3072](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-3072) [[GH-17908](https://github.com/hashicorp/nomad/issues/17908)] +* search: Fixed a bug where ACL did not filter plugin and variable names in search endpoint. [CVE-2023-3300](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-3300) [[GH-17906](https://github.com/hashicorp/nomad/issues/17906)] +* sentinel (Enterprise): Fixed a bug where ACL tokens could be exfiltrated via Sentinel logs [CVE-2023-3299](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-3299) [[GH-17907](https://github.com/hashicorp/nomad/issues/17907)] + +IMPROVEMENTS: + +* cli: Add `-quiet` flag to `nomad var init` command [[GH-17526](https://github.com/hashicorp/nomad/issues/17526)] +* cni: Ensure to setup CNI addresses in deterministic order [[GH-17766](https://github.com/hashicorp/nomad/issues/17766)] +* deps: Updated Vault SDK to 0.9.0 [[GH-17281](https://github.com/hashicorp/nomad/issues/17281)] +* deps: update docker to 23.0.3 [[GH-16862](https://github.com/hashicorp/nomad/issues/16862)] + +BUG FIXES: + +* api: Fixed a bug that caused a panic when calling the `Jobs().Plan()` function with a job missing an ID [[GH-17689](https://github.com/hashicorp/nomad/issues/17689)] +* api: add missing constant for unknown allocation status [[GH-17726](https://github.com/hashicorp/nomad/issues/17726)] +* api: add missing field NetworkStatus for Allocation [[GH-17280](https://github.com/hashicorp/nomad/issues/17280)] +* cgroups: Fixed a bug removing all DevicesSets when alloc is created/removed [[GH-17535](https://github.com/hashicorp/nomad/issues/17535)] +* cli: Output error messages during deployment monitoring [[GH-17348](https://github.com/hashicorp/nomad/issues/17348)] +* client: Fixed a bug where Nomad incorrectly wrote to memory swappiness cgroup on old kernels [[GH-17625](https://github.com/hashicorp/nomad/issues/17625)] +* client: fixed a bug that prevented Nomad from fingerprinting Consul 1.13.8 correctly [[GH-17349](https://github.com/hashicorp/nomad/issues/17349)] +* consul: Fixed a bug where Nomad would repeatedly try to revoke successfully revoked SI tokens [[GH-17847](https://github.com/hashicorp/nomad/issues/17847)] +* core: Fix panic around client deregistration and pending heartbeats [[GH-17316](https://github.com/hashicorp/nomad/issues/17316)] +* core: fixed a bug that caused job validation to fail when a task with `kill_timeout` was placed inside a group with `update.progress_deadline` set to 0 [[GH-17342](https://github.com/hashicorp/nomad/issues/17342)] +* csi: Fixed a bug where CSI volumes would fail to restore during client restarts [[GH-17840](https://github.com/hashicorp/nomad/issues/17840)] +* drivers/docker: Fixed a bug where long-running docker operations would incorrectly timeout [[GH-17731](https://github.com/hashicorp/nomad/issues/17731)] +* identity: Fixed a bug where workload identities for periodic and dispatch jobs would not have access to their parent job's ACL policy [[GH-17018](https://github.com/hashicorp/nomad/issues/17018)] +* replication: Fix a potential panic when a non-authoritative region is upgraded and a server with the new version becomes the leader. [[GH-17476](https://github.com/hashicorp/nomad/issues/17476)] +* scheduler: Fixed a bug that could cause replacements for failed allocations to be placed in the wrong datacenter during a canary deployment [[GH-17653](https://github.com/hashicorp/nomad/issues/17653)] +* scheduler: Fixed a panic when a node has only one configured dynamic port [[GH-17619](https://github.com/hashicorp/nomad/issues/17619)] +* ui: dont show a service as healthy when its parent allocation stops running [[GH-17465](https://github.com/hashicorp/nomad/issues/17465)] + +## 1.4.10 (May 19, 2023) + +IMPROVEMENTS: + +* core: Prevent `task.kill_timeout` being greater than `update.progress_deadline` [[GH-16761](https://github.com/hashicorp/nomad/issues/16761)] + +BUG FIXES: + +* bug: Corrected status description and modification time for canceled evaluations [[GH-17071](https://github.com/hashicorp/nomad/issues/17071)] +* client: Fixed a bug where restarting a terminal allocation turns it into a zombie where allocation and task hooks will run unexpectedly [[GH-17175](https://github.com/hashicorp/nomad/issues/17175)] +* client: clean up resources upon failure to restore task during client restart [[GH-17104](https://github.com/hashicorp/nomad/issues/17104)] +* scale: Fixed a bug where evals could be created with the wrong type [[GH-17092](https://github.com/hashicorp/nomad/issues/17092)] +* scheduler: Fixed a bug where implicit `spread` targets were treated as separate targets for scoring [[GH-17195](https://github.com/hashicorp/nomad/issues/17195)] +* scheduler: Fixed a bug where scores for spread scheduling could be -Inf [[GH-17198](https://github.com/hashicorp/nomad/issues/17198)] + +## 1.4.9 (May 02, 2023) + +IMPROVEMENTS: + +* build: Update from Go 1.20.3 to Go 1.20.4 [[GH-17056](https://github.com/hashicorp/nomad/issues/17056)] +* dependency: update runc to 1.1.5 [[GH-16712](https://github.com/hashicorp/nomad/issues/16712)] + +BUG FIXES: + +* api: Fixed filtering on maps with missing keys [[GH-16991](https://github.com/hashicorp/nomad/issues/16991)] +* build: Linux packages now have vendor label and set the default label to HashiCorp. This fix is implemented for any future releases, but will not be updated for historical releases [[GH-16071](https://github.com/hashicorp/nomad/issues/16071)] +* client: Fix CNI plugin version fingerprint when output includes protocol version [[GH-16776](https://github.com/hashicorp/nomad/issues/16776)] +* client: Fix address for ports in IPv6 networks [[GH-16723](https://github.com/hashicorp/nomad/issues/16723)] +* client: Fixed a bug where restarting proxy sidecar tasks failed [[GH-16815](https://github.com/hashicorp/nomad/issues/16815)] +* client: Prevent a panic when an allocation has a legacy task-level bridge network and uses a driver that does not create a network namespace [[GH-16921](https://github.com/hashicorp/nomad/issues/16921)] +* core: the deployment's list endpoint now supports look up by prefix using the wildcard for namespace [[GH-16792](https://github.com/hashicorp/nomad/issues/16792)] +* csi: gracefully recover tasks that use csi node plugins [[GH-16809](https://github.com/hashicorp/nomad/issues/16809)] +* docker: Fixed a bug where plugin config values were ignored [[GH-16713](https://github.com/hashicorp/nomad/issues/16713)] +* drain: Fixed a bug where drains would complete based on the server status and not the client status of an allocation [[GH-14348](https://github.com/hashicorp/nomad/issues/14348)] +* driver/exec: Fixed a bug where `cap_drop` and `cap_add` would not expand capabilities [[GH-16643](https://github.com/hashicorp/nomad/issues/16643)] +* scale: Do not allow scale requests for jobs of type system [[GH-16969](https://github.com/hashicorp/nomad/issues/16969)] +* scheduler: Fix reconciliation of reconnecting allocs when the replacement allocations are not running [[GH-16609](https://github.com/hashicorp/nomad/issues/16609)] +* scheduler: honor false value for distinct_hosts constraint [[GH-16907](https://github.com/hashicorp/nomad/issues/16907)] +* server: Added verification of cron jobs already running before forcing new evals right after leader change [[GH-16583](https://github.com/hashicorp/nomad/issues/16583)] +* services: Fixed a bug preventing group service deregistrations after alloc restarts [[GH-16905](https://github.com/hashicorp/nomad/issues/16905)] + +## 1.4.8 (April 04, 2023) + +SECURITY: + +* build: update to Go 1.20.3 to prevent denial of service attack via malicious HTTP headers [CVE-2023-24534](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-24534) [[GH-16788](https://github.com/hashicorp/nomad/issues/16788)] + +## 1.4.7 (March 21, 2023) + +IMPROVEMENTS: + +* build: Update to go1.20.2 [[GH-16427](https://github.com/hashicorp/nomad/issues/16427)] + +BUG FIXES: + +* client: Fixed a bug where clients using Consul discovery to join the cluster would get permission denied errors [[GH-16490](https://github.com/hashicorp/nomad/issues/16490)] +* client: Fixed a bug where cpuset initialization fails after Client restart [[GH-16467](https://github.com/hashicorp/nomad/issues/16467)] +* plugin: Add missing fields to `TaskConfig` so they can be accessed by external task drivers [[GH-16434](https://github.com/hashicorp/nomad/issues/16434)] +* services: Fixed a bug where a service would be deregistered twice [[GH-16289](https://github.com/hashicorp/nomad/issues/16289)] + +## 1.4.6 (March 10, 2023) + +SECURITY: + +* variables: Fixed a bug where a workload-associated policy with a deny capability was ignored for the workload's own variables [CVE-2023-1296](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2023-1296) [[GH-16349](https://github.com/hashicorp/nomad/issues/16349)] + +IMPROVEMENTS: + +* env/ec2: update cpu metadata [[GH-16417](https://github.com/hashicorp/nomad/issues/16417)] + +BUG FIXES: + +* client: Fixed a bug that prevented allocations with interpolated values in Consul services from being marked as healthy [[GH-16402](https://github.com/hashicorp/nomad/issues/16402)] +* client: Fixed a bug where clients used the serf advertise address to connect to servers when using Consul auto-discovery [[GH-16217](https://github.com/hashicorp/nomad/issues/16217)] +* docker: Fixed a bug where pause containers would be erroneously removed [[GH-16352](https://github.com/hashicorp/nomad/issues/16352)] +* scheduler: Fixed a bug where collisions in dynamic port offerings would result in spurious plan-for-node-rejected errors [[GH-16401](https://github.com/hashicorp/nomad/issues/16401)] +* server: Fixed a bug where deregistering a job that was already garbage collected would create a new evaluation [[GH-16287](https://github.com/hashicorp/nomad/issues/16287)] +* server: Fixed a bug where node updates that produced errors from service discovery or CSI plugin updates were not logged [[GH-16287](https://github.com/hashicorp/nomad/issues/16287)] +* server: Fixed a bug where the `system reconcile summaries` command and API would not return any scheduler-related errors [[GH-16287](https://github.com/hashicorp/nomad/issues/16287)] + +## 1.4.5 (March 01, 2023) + +BREAKING CHANGES: + +* core: Ensure no leakage of evaluations for batch jobs. Prior to this change allocations and evaluations for batch jobs were never garbage collected until the batch job was explicitly stopped. The new `batch_eval_gc_threshold` server configuration controls how often they are collected. The default threshold is `24h`. [[GH-15097](https://github.com/hashicorp/nomad/issues/15097)] + +IMPROVEMENTS: + +* api: improved error returned from AllocFS.Logs when response is not JSON [[GH-15558](https://github.com/hashicorp/nomad/issues/15558)] +* cli: Added `-wait` flag to `deployment status` for use with `-monitor` mode [[GH-15262](https://github.com/hashicorp/nomad/issues/15262)] +* cli: Added tls command to enable creating Certificate Authority and Self signed TLS certificates. + There are two sub commands `tls ca` and `tls cert` that are helpers when creating certificates. [[GH-14296](https://github.com/hashicorp/nomad/issues/14296)] +* client: detect and cleanup leaked iptables rules [[GH-15407](https://github.com/hashicorp/nomad/issues/15407)] +* consul: add client configuration for grpc_ca_file [[GH-15701](https://github.com/hashicorp/nomad/issues/15701)] +* deps: Update google.golang.org/grpc to v1.51.0 [[GH-15402](https://github.com/hashicorp/nomad/issues/15402)] +* docs: link to an envoy troubleshooting doc when envoy bootstrap fails [[GH-15908](https://github.com/hashicorp/nomad/issues/15908)] +* env/ec2: update cpu metadata [[GH-15770](https://github.com/hashicorp/nomad/issues/15770)] +* fingerprint: Detect CNI plugins and set versions as node attributes [[GH-15452](https://github.com/hashicorp/nomad/issues/15452)] +* scheduler: allow using device IDs in `affinity` and `constraint` [[GH-15455](https://github.com/hashicorp/nomad/issues/15455)] +* ui: Add a button for expanding the Task sidebar to full width [[GH-15735](https://github.com/hashicorp/nomad/issues/15735)] +* ui: Made task rows in Allocation tables look more aligned with their parent [[GH-15363](https://github.com/hashicorp/nomad/issues/15363)] +* ui: Show events alongside logs in the Task sidebar [[GH-15733](https://github.com/hashicorp/nomad/issues/15733)] +* ui: The web UI will now show canary_tags of services anyplace we would normally show tags. [[GH-15458](https://github.com/hashicorp/nomad/issues/15458)] + +DEPRECATIONS: + +* api: The connect `ConsulExposeConfig.Path` field is deprecated in favor of `ConsulExposeConfig.Paths` [[GH-15541](https://github.com/hashicorp/nomad/issues/15541)] +* api: The connect `ConsulProxy.ExposeConfig` field is deprecated in favor of `ConsulProxy.Expose` [[GH-15541](https://github.com/hashicorp/nomad/issues/15541)] + +BUG FIXES: + +* acl: Fixed a bug in token creation which failed to parse expiration TTLs correctly [[GH-15999](https://github.com/hashicorp/nomad/issues/15999)] +* acl: Fixed a bug where creating/updating a policy which was invalid would return a 404 status code, not a 400 [[GH-16000](https://github.com/hashicorp/nomad/issues/16000)] +* agent: Make agent syslog log level follow log_level config [[GH-15625](https://github.com/hashicorp/nomad/issues/15625)] +* api: Added missing node states to NodeStatus constants [[GH-16166](https://github.com/hashicorp/nomad/issues/16166)] +* api: Fix stale querystring parameter value as boolean [[GH-15605](https://github.com/hashicorp/nomad/issues/15605)] +* api: Fixed a bug where exposeConfig field was not provided correctly when getting the jobs via the API [[GH-15541](https://github.com/hashicorp/nomad/issues/15541)] +* api: Fixed a nil pointer dereference when periodic jobs are missing their periodic spec [[GH-13845](https://github.com/hashicorp/nomad/issues/13845)] +* cgutil: handle panic coming from runc helper method [[GH-16180](https://github.com/hashicorp/nomad/issues/16180)] +* check: Add support for sending custom host header [[GH-15337](https://github.com/hashicorp/nomad/issues/15337)] +* cli: Fixed a bug where `nomad fmt -check` would overwrite the file being checked [[GH-16174](https://github.com/hashicorp/nomad/issues/16174)] +* cli: Fixed a panic in `deployment status` when rollback deployments are slow to appear [[GH-16011](https://github.com/hashicorp/nomad/issues/16011)] +* cli: corrected typos in ACL role create/delete CLI commands [[GH-15382](https://github.com/hashicorp/nomad/issues/15382)] +* cli: fix nomad fmt -check flag not returning error code [[GH-15797](https://github.com/hashicorp/nomad/issues/15797)] +* client: Fixed a bug where allocation cleanup hooks would not run [[GH-15477](https://github.com/hashicorp/nomad/issues/15477)] +* connect: ingress http/2/grpc listeners may exclude hosts [[GH-15749](https://github.com/hashicorp/nomad/issues/15749)] +* consul: Fixed a bug where acceptable service identity on Consul token was not accepted [[GH-15928](https://github.com/hashicorp/nomad/issues/15928)] +* consul: Fixed a bug where consul token was not respected when reverting a job [[GH-15996](https://github.com/hashicorp/nomad/issues/15996)] +* consul: Fixed a bug where services would continuously re-register when using ipv6 [[GH-15411](https://github.com/hashicorp/nomad/issues/15411)] +* consul: correctly interpret missing consul checks as unhealthy [[GH-15822](https://github.com/hashicorp/nomad/issues/15822)] +* core: enforce strict ordering that node status updates are recorded after allocation updates for reconnecting clients [[GH-15808](https://github.com/hashicorp/nomad/issues/15808)] +* csi: Fixed a bug where a crashing plugin could panic the Nomad client [[GH-15518](https://github.com/hashicorp/nomad/issues/15518)] +* csi: Fixed a bug where secrets that include '=' were incorrectly rejected [[GH-15670](https://github.com/hashicorp/nomad/issues/15670)] +* csi: Fixed a bug where volumes in non-default namespaces could not be scheduled for system or sysbatch jobs [[GH-15372](https://github.com/hashicorp/nomad/issues/15372)] +* csi: Fixed potential state store corruption when garbage collecting CSI volume claims or checking whether it's safe to force-deregister a volume [[GH-16256](https://github.com/hashicorp/nomad/issues/16256)] +* docker: Fixed a bug where images referenced by multiple tags would not be GC'd [[GH-15962](https://github.com/hashicorp/nomad/issues/15962)] +* docker: Fixed a bug where infra_image did not get alloc_id label [[GH-15898](https://github.com/hashicorp/nomad/issues/15898)] +* docker: configure restart policy for bridge network pause container [[GH-15732](https://github.com/hashicorp/nomad/issues/15732)] +* eval broker: Fixed a bug where the cancelable eval reaper used an incorrect lock when getting the set of cancelable evals from the broker [[GH-16112](https://github.com/hashicorp/nomad/issues/16112)] +* event stream: Fixed a bug where undefined ACL policies on the request's ACL would result in incorrect authentication errors [[GH-15495](https://github.com/hashicorp/nomad/issues/15495)] +* fix: Add the missing option propagation_mode for volume_mount [[GH-15626](https://github.com/hashicorp/nomad/issues/15626)] +* parser: Fixed a panic in the job spec parser when a variable validation block was missing its condition [[GH-16018](https://github.com/hashicorp/nomad/issues/16018)] +* scheduler (Enterprise): Fixed a bug that prevented new allocations from multiregion jobs to be placed in situations where other regions are not involved, such as node updates. [[GH-15325](https://github.com/hashicorp/nomad/issues/15325)] +* services: Fixed a bug where check_restart on nomad services on tasks failed with incorrect CheckIDs [[GH-16240](https://github.com/hashicorp/nomad/issues/16240)] +* template: Fixed a bug that caused the chage script to fail to run [[GH-15915](https://github.com/hashicorp/nomad/issues/15915)] +* template: Fixed a bug where the template runner's Nomad token would be erased by in-place updates to a task [[GH-16266](https://github.com/hashicorp/nomad/issues/16266)] +* ui: Fix allocation memory chart to display the same value as the CLI [[GH-15909](https://github.com/hashicorp/nomad/issues/15909)] +* ui: Fix navigation to pages for jobs that are not in the default namespace [[GH-15906](https://github.com/hashicorp/nomad/issues/15906)] +* ui: Fixed a bug where the exec window would not maintain namespace upon refresh [[GH-15454](https://github.com/hashicorp/nomad/issues/15454)] +* ui: Scale down logger height in the UI when the sidebar container also has task events [[GH-15759](https://github.com/hashicorp/nomad/issues/15759)] +* volumes: Fixed a bug where `per_alloc` was allowed for volume blocks on system and sysbatch jobs, which do not have an allocation index [[GH-16030](https://github.com/hashicorp/nomad/issues/16030)] + +## 1.4.4 (February 14, 2023) + +SECURITY: + +* artifact: Provide mitigations against unbounded artifact decompression [[GH-16126](https://github.com/hashicorp/nomad/issues/16126)] +* build: Update to go1.20.1 [[GH-16182](https://github.com/hashicorp/nomad/issues/16182)] + +## 1.4.3 (November 21, 2022) + +IMPROVEMENTS: + +* api: Added an API for counting evaluations that match a filter [[GH-15147](https://github.com/hashicorp/nomad/issues/15147)] +* cli: Improved performance of eval delete with large filter sets [[GH-15117](https://github.com/hashicorp/nomad/issues/15117)] +* consul: add trace logging around service registrations [[GH-6115](https://github.com/hashicorp/nomad/issues/6115)] +* deps: Updated github.com/aws/aws-sdk-go from 1.44.84 to 1.44.126 [[GH-15081](https://github.com/hashicorp/nomad/issues/15081)] +* deps: Updated github.com/docker/cli from 20.10.18+incompatible to 20.10.21+incompatible [[GH-15078](https://github.com/hashicorp/nomad/issues/15078)] +* exec: Allow running commands from mounted host volumes [[GH-14851](https://github.com/hashicorp/nomad/issues/14851)] +* scheduler: when multiple evaluations are pending for the same job, evaluate the latest and cancel the intermediaries on success [[GH-14621](https://github.com/hashicorp/nomad/issues/14621)] +* server: Add a git `revision` tag to the serf tags gossiped between servers. [[GH-9159](https://github.com/hashicorp/nomad/issues/9159)] +* template: Expose per-template configuration for `error_on_missing_key`. This allows jobspec authors to specify that a +template should fail if it references a struct or map key that does not exist. The default value is false and should be +fully backward compatible. [[GH-14002](https://github.com/hashicorp/nomad/issues/14002)] +* ui: Adds a "Pack" tag and logo on the jobs list index when appropriate [[GH-14833](https://github.com/hashicorp/nomad/issues/14833)] +* ui: add consul connect service upstream and on-update info to the service sidebar [[GH-15324](https://github.com/hashicorp/nomad/issues/15324)] +* ui: allow users to upload files by click or drag in the web ui [[GH-14747](https://github.com/hashicorp/nomad/issues/14747)] + +BUG FIXES: + +* api: Ensure all request body decode errors return a 400 status code [[GH-15252](https://github.com/hashicorp/nomad/issues/15252)] +* autopilot: Fixed a bug where autopilot would try to fetch raft stats from other regions [[GH-15290](https://github.com/hashicorp/nomad/issues/15290)] +* cleanup: fixed missing timer.Reset for plan queue stat emitter [[GH-15134](https://github.com/hashicorp/nomad/issues/15134)] +* client: Fixed a bug where tasks would restart without waiting for interval [[GH-15215](https://github.com/hashicorp/nomad/issues/15215)] +* client: fixed a bug where non-`docker` tasks with network isolation would leak network namespaces and iptables rules if the client was restarted while they were running [[GH-15214](https://github.com/hashicorp/nomad/issues/15214)] +* client: prevent allocations from failing on client reconnect by retrying RPC requests when no servers are available yet [[GH-15140](https://github.com/hashicorp/nomad/issues/15140)] +* csi: Fixed race condition that can cause a panic when volume is garbage collected [[GH-15101](https://github.com/hashicorp/nomad/issues/15101)] +* device: Fixed a bug where device plugins would not fingerprint on startup [[GH-15125](https://github.com/hashicorp/nomad/issues/15125)] +* drivers: Fixed a bug where one goroutine was leaked per task [[GH-15180](https://github.com/hashicorp/nomad/issues/15180)] +* drivers: pass missing `propagation_mode` configuration for volume mounts to external plugins [[GH-15096](https://github.com/hashicorp/nomad/issues/15096)] +* event_stream: fixed a bug where dynamic port values would fail to serialize in the event stream [[GH-12916](https://github.com/hashicorp/nomad/issues/12916)] +* fingerprint: Ensure Nomad can correctly fingerprint Consul gRPC where the Consul agent is running v1.14.0 or greater [[GH-15309](https://github.com/hashicorp/nomad/issues/15309)] +* keyring: Fixed a bug where a missing key would prevent any further replication. [[GH-15092](https://github.com/hashicorp/nomad/issues/15092)] +* keyring: Fixed a bug where replication would stop after snapshot restores [[GH-15227](https://github.com/hashicorp/nomad/issues/15227)] +* keyring: Re-enabled keyring garbage collection after fixing a bug where keys would be garbage collected even if they were used to sign a live allocation's workload identity. [[GH-15092](https://github.com/hashicorp/nomad/issues/15092)] +* scheduler: Fixed a bug that prevented disconnected allocations to be updated after they reconnect. [[GH-15068](https://github.com/hashicorp/nomad/issues/15068)] +* scheduler: Prevent unnecessary placements when disconnected allocations reconnect. [[GH-15068](https://github.com/hashicorp/nomad/issues/15068)] +* template: Fixed a bug where template could cause agent panic on startup [[GH-15192](https://github.com/hashicorp/nomad/issues/15192)] +* ui: Fixed a bug where the task log sidebar would close and re-open if the parent job state changed [[GH-15146](https://github.com/hashicorp/nomad/issues/15146)] +* variables: Fixed a bug where a long-running rekey could hit the nack timeout [[GH-15102](https://github.com/hashicorp/nomad/issues/15102)] +* wi: Fixed a bug where clients running pre-1.4.0 allocations would erase the token used to query service registrations after upgrade [[GH-15121](https://github.com/hashicorp/nomad/issues/15121)] + +## 1.4.2 (October 26, 2022) + +SECURITY: + +* event stream: Fixed a bug where ACL token expiration was not checked when emitting events [[GH-15013](https://github.com/hashicorp/nomad/issues/15013)] +* variables: Fixed a bug where non-sensitive variable metadata (paths and raft indexes) was exposed via the template `nomadVarList` function to other jobs in the same namespace. [[GH-15012](https://github.com/hashicorp/nomad/issues/15012)] + +IMPROVEMENTS: + +* cli: Added `-id-prefix-template` option to `nomad job dispatch` [[GH-14631](https://github.com/hashicorp/nomad/issues/14631)] +* cli: add nomad fmt to the CLI [[GH-14779](https://github.com/hashicorp/nomad/issues/14779)] +* deps: update go-memdb for goroutine leak fix [[GH-14983](https://github.com/hashicorp/nomad/issues/14983)] +* docker: improve memory usage for docker_logger [[GH-14875](https://github.com/hashicorp/nomad/issues/14875)] +* event stream: Added ACL role topic with create and delete types [[GH-14923](https://github.com/hashicorp/nomad/issues/14923)] +* scheduler: Allow jobs not requiring network resources even when no network is fingerprinted [[GH-14300](https://github.com/hashicorp/nomad/issues/14300)] +* ui: adds searching and filtering to the topology page [[GH-14913](https://github.com/hashicorp/nomad/issues/14913)] + +BUG FIXES: + +* acl: Callers should be able to read policies linked via roles to the token used [[GH-14982](https://github.com/hashicorp/nomad/issues/14982)] +* acl: Ensure all federated servers meet v.1.4.0 minimum before ACL roles can be written [[GH-14908](https://github.com/hashicorp/nomad/issues/14908)] +* acl: Fixed a bug where Nomad version checking for one-time tokens was enforced across regions [[GH-14912](https://github.com/hashicorp/nomad/issues/14912)] +* cli: prevent a panic when the Nomad API returns an error while collecting a debug bundle [[GH-14992](https://github.com/hashicorp/nomad/issues/14992)] +* client: Check ACL token expiry when resolving token within ACL cache [[GH-14922](https://github.com/hashicorp/nomad/issues/14922)] +* client: Fixed a bug where Nomad could not detect cores on recent RHEL systems [[GH-15027](https://github.com/hashicorp/nomad/issues/15027)] +* client: Fixed a bug where network fingerprinters were not reloaded when the client configuration was reloaded with SIGHUP [[GH-14615](https://github.com/hashicorp/nomad/issues/14615)] +* client: Resolve ACL roles within client ACL cache [[GH-14922](https://github.com/hashicorp/nomad/issues/14922)] +* consul: Fixed a bug where services continuously re-registered [[GH-14917](https://github.com/hashicorp/nomad/issues/14917)] +* consul: atomically register checks on initial service registration [[GH-14944](https://github.com/hashicorp/nomad/issues/14944)] +* deps: Update hashicorp/consul-template to 90370e07bf621811826b803fb633dadbfb4cf287; fixes template rerendering issues when only user or group set [[GH-15045](https://github.com/hashicorp/nomad/issues/15045)] +* deps: Update hashicorp/raft to v1.3.11; fixes unstable leadership on server removal [[GH-15021](https://github.com/hashicorp/nomad/issues/15021)] +* event stream: Check ACL token expiry when resolving tokens [[GH-14923](https://github.com/hashicorp/nomad/issues/14923)] +* event stream: Resolve ACL roles within ACL tokens [[GH-14923](https://github.com/hashicorp/nomad/issues/14923)] +* keyring: Fixed a bug where `nomad system gc` forced a root keyring rotation. [[GH-15009](https://github.com/hashicorp/nomad/issues/15009)] +* keyring: Fixed a bug where if a key is rotated immediately following a leader election, plans that are in-flight may get signed before the new leader has the key. Allow for a short timeout-and-retry to avoid rejecting plans. [[GH-14987](https://github.com/hashicorp/nomad/issues/14987)] +* keyring: Fixed a bug where keyring initialization is blocked by un-upgraded federated regions [[GH-14901](https://github.com/hashicorp/nomad/issues/14901)] +* keyring: Fixed a bug where root keyring garbage collection configuration values were not respected. [[GH-15009](https://github.com/hashicorp/nomad/issues/15009)] +* keyring: Fixed a bug where root keyring initialization could occur before the raft FSM on the leader was verified to be up-to-date. [[GH-14987](https://github.com/hashicorp/nomad/issues/14987)] +* keyring: Fixed a bug where root keyring replication could make incorrectly stale queries and exit early if those queries did not return the expected key. [[GH-14987](https://github.com/hashicorp/nomad/issues/14987)] +* keyring: Fixed a bug where the root keyring replicator's rate limiting would be skipped if the keyring replication exceeded the burst rate. [[GH-14987](https://github.com/hashicorp/nomad/issues/14987)] +* keyring: Removed root key garbage collection to avoid orphaned workload identities [[GH-15034](https://github.com/hashicorp/nomad/issues/15034)] +* nomad native service discovery: Ensure all local servers meet v.1.3.0 minimum before service registrations can be written [[GH-14924](https://github.com/hashicorp/nomad/issues/14924)] +* scheduler: Fixed a bug where version checking for disconnected clients handling was enforced across regions [[GH-14912](https://github.com/hashicorp/nomad/issues/14912)] +* servicedisco: Fixed a bug where job using checks could land on incompatible client [[GH-14868](https://github.com/hashicorp/nomad/issues/14868)] +* services: Fixed a regression where check task validation stopped allowing some configurations [[GH-14864](https://github.com/hashicorp/nomad/issues/14864)] +* ui: Fixed line charts to update x-axis (time) where relevant [[GH-14814](https://github.com/hashicorp/nomad/issues/14814)] +* ui: Fixes an issue where service tags would bleed past the edge of the screen [[GH-14832](https://github.com/hashicorp/nomad/issues/14832)] +* variables: Fixed a bug where Nomad version checking was not enforced for writing to variables [[GH-14912](https://github.com/hashicorp/nomad/issues/14912)] +* variables: Fixed a bug where getting empty results from listing variables resulted in a permission denied error. [[GH-15012](https://github.com/hashicorp/nomad/issues/15012)] + +## 1.4.1 (October 06, 2022) + +BUG FIXES: + +* keyring: Fixed a panic that can occur during upgrades to 1.4.0 when initializing the keyring [[GH-14821](https://github.com/hashicorp/nomad/issues/14821)] + +## 1.4.0 (October 04, 2022) + +FEATURES: + +* **ACL Roles:** Added support for ACL Roles. [[GH-14320](https://github.com/hashicorp/nomad/issues/14320)] +* **Nomad Native Service Discovery**: Add built-in support for checks on Nomad services [[GH-13715](https://github.com/hashicorp/nomad/issues/13715)] +* **Variables:** Added support for storing encrypted configuration values. [[GH-13000](https://github.com/hashicorp/nomad/issues/13000)] +* **UI Services table:** Display task-level services in addition to group-level services. [[GH-14199](https://github.com/hashicorp/nomad/issues/14199)] + +BREAKING CHANGES: + +* audit (Enterprise): fixed inconsistency in event filter logic [[GH-14212](https://github.com/hashicorp/nomad/issues/14212)] +* cli: `eval status -json` no longer supports listing all evals in JSON. Use `eval list -json`. [[GH-14651](https://github.com/hashicorp/nomad/issues/14651)] +* core: remove support for raft protocol version 2 [[GH-13467](https://github.com/hashicorp/nomad/issues/13467)] + +SECURITY: + +* client: recover from panics caused by artifact download to prevent the Nomad client from crashing [[GH-14696](https://github.com/hashicorp/nomad/issues/14696)] + +IMPROVEMENTS: + +* acl: ACL tokens can now be created with an expiration TTL. [[GH-14320](https://github.com/hashicorp/nomad/issues/14320)] +* api: return a more descriptive error when /v1/acl/bootstrap fails to decode request body [[GH-14629](https://github.com/hashicorp/nomad/issues/14629)] +* autopilot: upgrade to raft-autopilot library [[GH-14441](https://github.com/hashicorp/nomad/issues/14441)] +* cli: Removed deprecated network quota fields from `quota status` output [[GH-14468](https://github.com/hashicorp/nomad/issues/14468)] +* cli: `acl policy info` output format has changed to improve readability with large policy documents [[GH-14140](https://github.com/hashicorp/nomad/issues/14140)] +* cli: `operator debug` now writes newline-delimited JSON files for large collections [[GH-14610](https://github.com/hashicorp/nomad/issues/14610)] +* cli: ignore `-hcl2-strict` when -hcl1 is set. [[GH-14426](https://github.com/hashicorp/nomad/issues/14426)] +* cli: warn destructive update only when count is greater than 1 [[GH-13103](https://github.com/hashicorp/nomad/issues/13103)] +* client: Add built-in support for checks on nomad services [[GH-13715](https://github.com/hashicorp/nomad/issues/13715)] +* client: re-enable nss-based user lookups [[GH-14742](https://github.com/hashicorp/nomad/issues/14742)] +* connect: add namespace, job, and group to Envoy stats [[GH-14311](https://github.com/hashicorp/nomad/issues/14311)] +* connect: add nomad environment variables to envoy bootstrap [[GH-12959](https://github.com/hashicorp/nomad/issues/12959)] +* consul: Allow interpolation of task environment values into Consul Service Mesh configuration [[GH-14445](https://github.com/hashicorp/nomad/issues/14445)] +* consul: Enable setting custom tagged_addresses field [[GH-12951](https://github.com/hashicorp/nomad/issues/12951)] +* core: constraint operands are now compared numerically if operands are numbers [[GH-14722](https://github.com/hashicorp/nomad/issues/14722)] +* deps: Update fsouza/go-dockerclient to v1.8.2 [[GH-14112](https://github.com/hashicorp/nomad/issues/14112)] +* deps: Update go.etcd.io/bbolt to v1.3.6 [[GH-14025](https://github.com/hashicorp/nomad/issues/14025)] +* deps: Update google.golang.org/grpc to v1.48.0 [[GH-14103](https://github.com/hashicorp/nomad/issues/14103)] +* deps: Update gopsutil for improvements in fingerprinting on non-Linux platforms [[GH-14209](https://github.com/hashicorp/nomad/issues/14209)] +* deps: Updated `github.com/armon/go-metrics` to `v0.4.1` which includes a performance improvement for Prometheus sink [[GH-14493](https://github.com/hashicorp/nomad/issues/14493)] +* deps: Updated `github.com/hashicorp/go-version` to `v1.6.0` [[GH-14364](https://github.com/hashicorp/nomad/issues/14364)] +* deps: remove unused darwin C library [[GH-13894](https://github.com/hashicorp/nomad/issues/13894)] +* fingerprint: Add node attribute for number of reservable cores: `cpu.num_reservable_cores` [[GH-14694](https://github.com/hashicorp/nomad/issues/14694)] +* fingerprint: Consul and Vault attributes are no longer cleared on fingerprinting failure [[GH-14673](https://github.com/hashicorp/nomad/issues/14673)] +* jobspec: Added `strlen` HCL2 function to determine the length of a string [[GH-14463](https://github.com/hashicorp/nomad/issues/14463)] +* server: Log when a node's eligibility changes [[GH-14125](https://github.com/hashicorp/nomad/issues/14125)] +* ui: Display different message when trying to exec into a job with no task running. [[GH-14071](https://github.com/hashicorp/nomad/issues/14071)] +* ui: add service discovery, along with health checks, to job and allocation routes [[GH-14408](https://github.com/hashicorp/nomad/issues/14408)] +* ui: adds a sidebar to show in-page logs for a given task, accessible via job, client, or task group routes [[GH-14612](https://github.com/hashicorp/nomad/issues/14612)] +* ui: allow deep-dive clicks to tasks from client, job, and task group routes. [[GH-14592](https://github.com/hashicorp/nomad/issues/14592)] +* ui: attach timestamps and a visual indicator on failure to health checks in the Web UI [[GH-14677](https://github.com/hashicorp/nomad/issues/14677)] + +BUG FIXES: + +* api: Fixed a bug where the List Volume API did not include the `ControllerRequired` and `ResourceExhausted` fields. [[GH-14484](https://github.com/hashicorp/nomad/issues/14484)] +* cli: Ignore Vault token when generating job diff. [[GH-14424](https://github.com/hashicorp/nomad/issues/14424)] +* cli: fixed a bug in the `operator api` command where the HTTPS scheme was not always correctly calculated [[GH-14635](https://github.com/hashicorp/nomad/issues/14635)] +* cli: return exit code `255` when `nomad job plan` fails job validation. [[GH-14426](https://github.com/hashicorp/nomad/issues/14426)] +* cli: set content length on POST requests when using the `nomad operator api` command [[GH-14634](https://github.com/hashicorp/nomad/issues/14634)] +* client: Fixed bug where clients could attempt to connect to servers with invalid addresses retrieved from Consul. [[GH-14431](https://github.com/hashicorp/nomad/issues/14431)] +* core: prevent new allocations from overlapping execution with stopping allocations [[GH-10446](https://github.com/hashicorp/nomad/issues/10446)] +* csi: Fixed a bug where a volume that was successfully unmounted by the client but then failed controller unpublishing would not be marked free until garbage collection ran. [[GH-14675](https://github.com/hashicorp/nomad/issues/14675)] +* csi: Fixed a bug where the server would not send controller unpublish for a failed allocation. [[GH-14484](https://github.com/hashicorp/nomad/issues/14484)] +* csi: Fixed a data race in the volume unpublish endpoint that could result in claims being incorrectly marked as freed before being persisted to raft. [[GH-14484](https://github.com/hashicorp/nomad/issues/14484)] +* helpers: Fixed a bug where random stagger func did not protect against negative inputs [[GH-14497](https://github.com/hashicorp/nomad/issues/14497)] +* jobspec: Fixed a bug where an `artifact` with `headers` configuration would fail to parse when using HCLv1 [[GH-14637](https://github.com/hashicorp/nomad/issues/14637)] +* metrics: Update client `node_scheduling_eligibility` value with server heartbeats. [[GH-14483](https://github.com/hashicorp/nomad/issues/14483)] +* quotas (Enterprise): Fixed a server crashing panic when updating and checking a quota concurrently. +* rpc (Enterprise): check for spec changes in all regions when registering multiregion jobs [[GH-14519](https://github.com/hashicorp/nomad/issues/14519)] +* scheduler (Enterprise): Fixed bug where the scheduler would treat multiregion jobs as paused for job types that don't use deployments [[GH-14659](https://github.com/hashicorp/nomad/issues/14659)] +* template: Fixed a bug where the `splay` timeout was not being applied when `change_mode` was set to `script`. [[GH-14749](https://github.com/hashicorp/nomad/issues/14749)] +* ui: Remove extra space when displaying the version in the menu footer. [[GH-14457](https://github.com/hashicorp/nomad/issues/14457)] + + ## Unsupported Versions -Versions of Nomad before 1.5.0 are no longer supported. See [CHANGELOG-unsupported.md](./CHANGELOG-unsupported.md) for their changelogs. +Versions of Nomad before 1.4.0 are no longer supported. See [CHANGELOG-unsupported.md](./CHANGELOG-unsupported.md) for their changelogs. diff --git a/GNUmakefile b/GNUmakefile index 2052157862e8..e8a3da33b878 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -135,7 +135,7 @@ deps: ## Install build and development dependencies go install gotest.tools/gotestsum@v1.10.0 go install github.com/hashicorp/hcl/v2/cmd/hclfmt@d0c4fa8b0bbc2e4eeccd1ed2a32c2089ed8c5cf1 go install github.com/golang/protobuf/protoc-gen-go@v1.3.4 - go install github.com/hashicorp/go-msgpack/v2/codec/codecgen@v2.1.2 + go install github.com/hashicorp/go-msgpack/codec/codecgen@v1.1.5+base32 go install github.com/bufbuild/buf/cmd/buf@v0.36.0 go install github.com/hashicorp/go-changelog/cmd/changelog-build@latest go install golang.org/x/tools/cmd/stringer@v0.18.0 diff --git a/api/go.mod b/api/go.mod index 017cd4d93a4d..bca3c4536636 100644 --- a/api/go.mod +++ b/api/go.mod @@ -12,7 +12,7 @@ require ( github.com/hashicorp/go-rootcerts v1.0.2 github.com/mitchellh/go-testing-interface v1.14.1 github.com/mitchellh/mapstructure v1.5.0 - github.com/shoenig/test v1.7.1 + github.com/shoenig/test v1.7.0 golang.org/x/exp v0.0.0-20230728194245-b0cb94b80691 ) diff --git a/api/go.sum b/api/go.sum index 92a42b04c90c..e37210d463e0 100644 --- a/api/go.sum +++ b/api/go.sum @@ -27,8 +27,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= -github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= +github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= +github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= diff --git a/api/tasks.go b/api/tasks.go index e9a574cee328..0ca26cc645c0 100644 --- a/api/tasks.go +++ b/api/tasks.go @@ -11,8 +11,6 @@ import ( "time" ) -type ReconcileOption = string - const ( // RestartPolicyModeDelay causes an artificial delay till the next interval is // reached when the specified attempts have been reached in the interval. @@ -21,14 +19,6 @@ const ( // RestartPolicyModeFail causes a job to fail if the specified number of // attempts are reached within an interval. RestartPolicyModeFail = "fail" - - // ReconcileOption is used to specify the behavior of the reconciliation process - // between the original allocations and the replacements when a previously - // disconnected client comes back online. - ReconcileOptionKeepOriginal = "keep_original" - ReconcileOptionKeepReplacement = "keep_replacement" - ReconcileOptionBestScore = "best_score" - ReconcileOptionLongestRunning = "longest_running" ) // MemoryStats holds memory usage related stats @@ -123,37 +113,6 @@ func (r *RestartPolicy) Merge(rp *RestartPolicy) { } } -// Disconnect strategy defines how both clients and server should behave in case of -// disconnection between them. -type DisconnectStrategy struct { - // Defines for how long the server will consider the unresponsive node as - // disconnected but alive instead of lost. - LostAfter *time.Duration `mapstructure:"lost_after" hcl:"lost_after,optional"` - - // Defines for how long a disconnected client will keep its allocations running. - StopOnClientAfter *time.Duration `mapstructure:"stop_on_client_after" hcl:"stop_on_client_after,optional"` - - // A boolean field used to define if the allocations should be replaced while - // it's considered disconnected. - Replace *bool `mapstructure:"replace" hcl:"replace,optional"` - - // Once the disconnected node starts reporting again, it will define which - // instances to keep: the original allocations, the replacement, the one - // running on the node with the best score as it is currently implemented, - // or the allocation that has been running continuously the longest. - Reconcile *ReconcileOption `mapstructure:"reconcile" hcl:"reconcile,optional"` -} - -func (ds *DisconnectStrategy) Canonicalize() { - if ds.Replace == nil { - ds.Replace = pointerOf(true) - } - - if ds.Reconcile == nil { - ds.Reconcile = pointerOf(ReconcileOptionBestScore) - } -} - // Reschedule configures how Tasks are rescheduled when they crash or fail. type ReschedulePolicy struct { // Attempts limits the number of rescheduling attempts that can occur in an interval. @@ -246,14 +205,6 @@ func (a *Affinity) Canonicalize() { } } -func NewDefaultDisconnectStrategy() *DisconnectStrategy { - return &DisconnectStrategy{ - LostAfter: pointerOf(0 * time.Minute), - Replace: pointerOf(true), - Reconcile: pointerOf(ReconcileOptionBestScore), - } -} - func NewDefaultReschedulePolicy(jobType string) *ReschedulePolicy { var dp *ReschedulePolicy switch jobType { @@ -475,50 +426,40 @@ type VolumeMount struct { Destination *string `hcl:"destination,optional"` ReadOnly *bool `mapstructure:"read_only" hcl:"read_only,optional"` PropagationMode *string `mapstructure:"propagation_mode" hcl:"propagation_mode,optional"` - SELinuxLabel *string `mapstructure:"selinux_label" hcl:"selinux_label,optional"` } func (vm *VolumeMount) Canonicalize() { if vm.PropagationMode == nil { vm.PropagationMode = pointerOf(VolumeMountPropagationPrivate) } - if vm.ReadOnly == nil { vm.ReadOnly = pointerOf(false) } - - if vm.SELinuxLabel == nil { - vm.SELinuxLabel = pointerOf("") - } } // TaskGroup is the unit of scheduling. type TaskGroup struct { - Name *string `hcl:"name,label"` - Count *int `hcl:"count,optional"` - Constraints []*Constraint `hcl:"constraint,block"` - Affinities []*Affinity `hcl:"affinity,block"` - Tasks []*Task `hcl:"task,block"` - Spreads []*Spread `hcl:"spread,block"` - Volumes map[string]*VolumeRequest `hcl:"volume,block"` - RestartPolicy *RestartPolicy `hcl:"restart,block"` - Disconnect *DisconnectStrategy `hcl:"disconnect,block"` - ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"` - EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"` - Update *UpdateStrategy `hcl:"update,block"` - Migrate *MigrateStrategy `hcl:"migrate,block"` - Networks []*NetworkResource `hcl:"network,block"` - Meta map[string]string `hcl:"meta,block"` - Services []*Service `hcl:"service,block"` - ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` - // Deprecated: StopAfterClientDisconnect is deprecated in Nomad 1.8. Use Disconnect.StopOnClientAfter instead. - StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"` - // To be deprecated after 1.8.0 infavour of Disconnect.LostAfter - MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"` - Scaling *ScalingPolicy `hcl:"scaling,block"` - Consul *Consul `hcl:"consul,block"` - // To be deprecated after 1.8.0 infavour of Disconnect.Replace - PreventRescheduleOnLost *bool `hcl:"prevent_reschedule_on_lost,optional"` + Name *string `hcl:"name,label"` + Count *int `hcl:"count,optional"` + Constraints []*Constraint `hcl:"constraint,block"` + Affinities []*Affinity `hcl:"affinity,block"` + Tasks []*Task `hcl:"task,block"` + Spreads []*Spread `hcl:"spread,block"` + Volumes map[string]*VolumeRequest `hcl:"volume,block"` + RestartPolicy *RestartPolicy `hcl:"restart,block"` + ReschedulePolicy *ReschedulePolicy `hcl:"reschedule,block"` + EphemeralDisk *EphemeralDisk `hcl:"ephemeral_disk,block"` + Update *UpdateStrategy `hcl:"update,block"` + Migrate *MigrateStrategy `hcl:"migrate,block"` + Networks []*NetworkResource `hcl:"network,block"` + Meta map[string]string `hcl:"meta,block"` + Services []*Service `hcl:"service,block"` + ShutdownDelay *time.Duration `mapstructure:"shutdown_delay" hcl:"shutdown_delay,optional"` + StopAfterClientDisconnect *time.Duration `mapstructure:"stop_after_client_disconnect" hcl:"stop_after_client_disconnect,optional"` + MaxClientDisconnect *time.Duration `mapstructure:"max_client_disconnect" hcl:"max_client_disconnect,optional"` + Scaling *ScalingPolicy `hcl:"scaling,block"` + Consul *Consul `hcl:"consul,block"` + PreventRescheduleOnLost *bool `hcl:"prevent_reschedule_on_lost,optional"` } // NewTaskGroup creates a new TaskGroup. @@ -590,7 +531,6 @@ func (g *TaskGroup) Canonicalize(job *Job) { if g.ReschedulePolicy != nil { g.ReschedulePolicy.Canonicalize(*job.Type) } - // Merge the migrate strategy from the job if jm, tm := job.Migrate != nil, g.Migrate != nil; jm && tm { jobMigrate := job.Migrate.Copy() @@ -638,14 +578,9 @@ func (g *TaskGroup) Canonicalize(job *Job) { for _, s := range g.Services { s.Canonicalize(nil, g, job) } - if g.PreventRescheduleOnLost == nil { g.PreventRescheduleOnLost = pointerOf(false) } - - if g.Disconnect != nil { - g.Disconnect.Canonicalize() - } } // These needs to be in sync with DefaultServiceJobRestartPolicy in @@ -856,21 +791,17 @@ func (t *Task) Canonicalize(tg *TaskGroup, job *Job) { // TaskArtifact is used to download artifacts before running a task. type TaskArtifact struct { - GetterSource *string `mapstructure:"source" hcl:"source,optional"` - GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"` - GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"` - GetterMode *string `mapstructure:"mode" hcl:"mode,optional"` - GetterInsecure *bool `mapstructure:"insecure" hcl:"insecure,optional"` - RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"` + GetterSource *string `mapstructure:"source" hcl:"source,optional"` + GetterOptions map[string]string `mapstructure:"options" hcl:"options,block"` + GetterHeaders map[string]string `mapstructure:"headers" hcl:"headers,block"` + GetterMode *string `mapstructure:"mode" hcl:"mode,optional"` + RelativeDest *string `mapstructure:"destination" hcl:"destination,optional"` } func (a *TaskArtifact) Canonicalize() { if a.GetterMode == nil { a.GetterMode = pointerOf("any") } - if a.GetterInsecure == nil { - a.GetterInsecure = pointerOf(false) - } if a.GetterSource == nil { // Shouldn't be possible, but we don't want to panic a.GetterSource = pointerOf("") diff --git a/api/tasks_test.go b/api/tasks_test.go index e98d6ef1d1c7..78a6cbf618a4 100644 --- a/api/tasks_test.go +++ b/api/tasks_test.go @@ -317,7 +317,6 @@ func TestTask_Artifact(t *testing.T) { } a.Canonicalize() must.Eq(t, "file", *a.GetterMode) - must.Eq(t, false, *a.GetterInsecure) must.Eq(t, "local/foo.txt", filepath.ToSlash(*a.RelativeDest)) must.Nil(t, a.GetterOptions) must.Nil(t, a.GetterHeaders) diff --git a/client/agent_endpoint.go b/client/agent_endpoint.go index 4d676bc3129f..0e4d1d322ccd 100644 --- a/client/agent_endpoint.go +++ b/client/agent_endpoint.go @@ -10,7 +10,7 @@ import ( "io" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/command/agent/host" "github.com/hashicorp/nomad/command/agent/monitor" diff --git a/client/agent_endpoint_test.go b/client/agent_endpoint_test.go index 7c02691e5a0e..4caad50da6dd 100644 --- a/client/agent_endpoint_test.go +++ b/client/agent_endpoint_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" diff --git a/client/alloc_endpoint.go b/client/alloc_endpoint.go index 1d0136ad4347..d063fd54dff9 100644 --- a/client/alloc_endpoint.go +++ b/client/alloc_endpoint.go @@ -13,14 +13,13 @@ import ( "time" "github.com/armon/go-metrics" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" nstructs "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" ) // Allocations endpoint is used for interacting with client allocations @@ -287,7 +286,7 @@ func (a *Allocations) execImpl(encoder *codec.Encoder, decoder *codec.Decoder, e } // check node access - if capabilities.FSIsolation == fsisolation.None { + if capabilities.FSIsolation == drivers.FSIsolationNone { exec := aclObj.AllowNsOp(alloc.Namespace, acl.NamespaceCapabilityAllocNodeExec) if !exec { return nil, nstructs.ErrPermissionDenied diff --git a/client/alloc_endpoint_test.go b/client/alloc_endpoint_test.go index 6e651c63bc7c..13ed9b947a5f 100644 --- a/client/alloc_endpoint_test.go +++ b/client/alloc_endpoint_test.go @@ -13,7 +13,7 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/config" diff --git a/client/allocdir/alloc_dir.go b/client/allocdir/alloc_dir.go index fd4d60610b85..d5cf56587517 100644 --- a/client/allocdir/alloc_dir.go +++ b/client/allocdir/alloc_dir.go @@ -28,18 +28,6 @@ const ( // idUnsupported is what the uid/gid will be set to on platforms (eg // Windows) that don't support integer ownership identifiers. idUnsupported = -1 - - // fileMode777 is a constant that represents the file mode rwxrwxrwx - fileMode777 = os.FileMode(0o777) - - // fileMode710 is a constant that represents the file mode rwx--x--- - fileMode710 = os.FileMode(0o710) - - // fileMode755 is a constant that represents the file mode rwxr-xr-x - fileMode755 = os.FileMode(0o755) - - // fileMode666 is a constant that represents the file mode rw-rw-rw- - fileMode666 = os.FileMode(0o666) ) var ( @@ -77,7 +65,7 @@ var ( TaskPrivate = "private" // TaskDirs is the set of directories created in each tasks directory. - TaskDirs = map[string]os.FileMode{TmpDirName: os.ModeSticky | fileMode777} + TaskDirs = map[string]os.FileMode{TmpDirName: os.ModeSticky | 0777} // AllocGRPCSocket is the path relative to the task dir root for the // unix socket connected to Consul's gRPC endpoint. @@ -88,19 +76,6 @@ var ( AllocHTTPSocket = filepath.Join(SharedAllocName, TmpDirName, "consul_http.sock") ) -// Interface is implemented by AllocDir. -type Interface interface { - AllocDirFS - - NewTaskDir(string) *TaskDir - AllocDirPath() string - ShareDirPath() string - GetTaskDir(string) *TaskDir - Build() error - Destroy() error - Move(Interface, []*structs.Task) error -} - // AllocDir allows creating, destroying, and accessing an allocation's // directory. All methods are safe for concurrent use. type AllocDir struct { @@ -119,10 +94,6 @@ type AllocDir struct { // be excluded from chroots and is configured via client.alloc_dir. clientAllocDir string - // clientAllocMountsDir is the client agent's mounts directory. It must be - // excluded from chroots and is configured via client.mounts_dir. - clientAllocMountsDir string - // built is true if Build has successfully run built bool @@ -131,20 +102,6 @@ type AllocDir struct { logger hclog.Logger } -func (a *AllocDir) AllocDirPath() string { - return a.AllocDir -} - -func (a *AllocDir) ShareDirPath() string { - return a.SharedDir -} - -func (a *AllocDir) GetTaskDir(task string) *TaskDir { - a.mu.RLock() - defer a.mu.RUnlock() - return a.TaskDirs[task] -} - // AllocDirFS exposes file operations on the alloc dir type AllocDirFS interface { List(path string) ([]*cstructs.AllocFileInfo, error) @@ -157,18 +114,15 @@ type AllocDirFS interface { // NewAllocDir initializes the AllocDir struct with allocDir as base path for // the allocation directory. -func NewAllocDir(logger hclog.Logger, clientAllocDir, clientMountsDir, allocID string) *AllocDir { +func NewAllocDir(logger hclog.Logger, clientAllocDir, allocID string) *AllocDir { logger = logger.Named("alloc_dir") allocDir := filepath.Join(clientAllocDir, allocID) - shareDir := filepath.Join(allocDir, SharedAllocName) - return &AllocDir{ - clientAllocDir: clientAllocDir, - clientAllocMountsDir: clientMountsDir, - AllocDir: allocDir, - SharedDir: shareDir, - TaskDirs: make(map[string]*TaskDir), - logger: logger, + clientAllocDir: clientAllocDir, + AllocDir: allocDir, + SharedDir: filepath.Join(allocDir, SharedAllocName), + TaskDirs: make(map[string]*TaskDir), + logger: logger, } } @@ -177,7 +131,7 @@ func (d *AllocDir) NewTaskDir(name string) *TaskDir { d.mu.Lock() defer d.mu.Unlock() - td := d.newTaskDir(name) + td := newTaskDir(d.logger, d.clientAllocDir, d.AllocDir, name) d.TaskDirs[name] = td return td } @@ -222,7 +176,7 @@ func (d *AllocDir) Snapshot(w io.Writer) error { } hdr, err := tar.FileInfoHeader(fileInfo, link) if err != nil { - return fmt.Errorf("error creating file header: %w", err) + return fmt.Errorf("error creating file header: %v", err) } hdr.Name = relPath if err := tw.WriteHeader(hdr); err != nil { @@ -260,7 +214,7 @@ func (d *AllocDir) Snapshot(w io.Writer) error { // anyway. d.logger.Warn("snapshotting failed and unable to write error marker", "error", writeErr) } - return fmt.Errorf("failed to snapshot %s: %w", path, err) + return fmt.Errorf("failed to snapshot %s: %v", path, err) } } @@ -268,7 +222,7 @@ func (d *AllocDir) Snapshot(w io.Writer) error { } // Move other alloc directory's shared path and local dir to this alloc dir. -func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error { +func (d *AllocDir) Move(other *AllocDir, tasks []*structs.Task) error { d.mu.RLock() if !d.built { // Enforce the invariant that Build is called before Move @@ -280,31 +234,31 @@ func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error { d.mu.RUnlock() // Move the data directory - otherDataDir := filepath.Join(other.ShareDirPath(), SharedDataDir) + otherDataDir := filepath.Join(other.SharedDir, SharedDataDir) dataDir := filepath.Join(d.SharedDir, SharedDataDir) if fileInfo, err := os.Stat(otherDataDir); fileInfo != nil && err == nil { os.Remove(dataDir) // remove an empty data dir if it exists if err := os.Rename(otherDataDir, dataDir); err != nil { - return fmt.Errorf("error moving data dir: %w", err) + return fmt.Errorf("error moving data dir: %v", err) } } // Move the task directories for _, task := range tasks { - otherTaskDir := filepath.Join(other.AllocDirPath(), task.Name) + otherTaskDir := filepath.Join(other.AllocDir, task.Name) otherTaskLocal := filepath.Join(otherTaskDir, TaskLocal) fileInfo, err := os.Stat(otherTaskLocal) if fileInfo != nil && err == nil { // TaskDirs haven't been built yet, so create it newTaskDir := filepath.Join(d.AllocDir, task.Name) - if err := os.MkdirAll(newTaskDir, fileMode777); err != nil { - return fmt.Errorf("error creating task %q dir: %w", task.Name, err) + if err := os.MkdirAll(newTaskDir, 0777); err != nil { + return fmt.Errorf("error creating task %q dir: %v", task.Name, err) } localDir := filepath.Join(newTaskDir, TaskLocal) os.Remove(localDir) // remove an empty local dir if it exists if err := os.Rename(otherTaskLocal, localDir); err != nil { - return fmt.Errorf("error moving task %q local dir: %w", task.Name, err) + return fmt.Errorf("error moving task %q local dir: %v", task.Name, err) } } } @@ -315,13 +269,13 @@ func (d *AllocDir) Move(other Interface, tasks []*structs.Task) error { // Destroy tears down previously build directory structure. func (d *AllocDir) Destroy() error { // Unmount all mounted shared alloc dirs. - mErr := new(multierror.Error) + var mErr multierror.Error if err := d.UnmountAll(); err != nil { - mErr = multierror.Append(mErr, err) + mErr.Errors = append(mErr.Errors, err) } if err := os.RemoveAll(d.AllocDir); err != nil { - mErr = multierror.Append(mErr, fmt.Errorf("failed to remove alloc dir %q: %w", d.AllocDir, err)) + mErr.Errors = append(mErr.Errors, fmt.Errorf("failed to remove alloc dir %q: %v", d.AllocDir, err)) } // Unset built since the alloc dir has been destroyed. @@ -336,10 +290,36 @@ func (d *AllocDir) UnmountAll() error { d.mu.RLock() defer d.mu.RUnlock() - mErr := new(multierror.Error) + var mErr multierror.Error for _, dir := range d.TaskDirs { - if err := dir.Unmount(); err != nil { - mErr = multierror.Append(mErr, err) + // Check if the directory has the shared alloc mounted. + if pathExists(dir.SharedTaskDir) { + if err := unlinkDir(dir.SharedTaskDir); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to unmount shared alloc dir %q: %v", dir.SharedTaskDir, err)) + } else if err := os.RemoveAll(dir.SharedTaskDir); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to delete shared alloc dir %q: %v", dir.SharedTaskDir, err)) + } + } + + if pathExists(dir.SecretsDir) { + if err := removeSecretDir(dir.SecretsDir); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to remove the secret dir %q: %v", dir.SecretsDir, err)) + } + } + + if pathExists(dir.PrivateDir) { + if err := removeSecretDir(dir.PrivateDir); err != nil { + mErr.Errors = append(mErr.Errors, + fmt.Errorf("failed to remove the private dir %q: %v", dir.PrivateDir, err)) + } + } + + // Unmount dev/ and proc/ have been mounted. + if err := dir.unmountSpecialDirs(); err != nil { + mErr.Errors = append(mErr.Errors, err) } } @@ -349,19 +329,27 @@ func (d *AllocDir) UnmountAll() error { // Build the directory tree for an allocation. func (d *AllocDir) Build() error { // Make the alloc directory, owned by the nomad process. - if err := os.MkdirAll(d.AllocDir, fileMode755); err != nil { - return fmt.Errorf("Failed to make the alloc directory %v: %w", d.AllocDir, err) + if err := os.MkdirAll(d.AllocDir, 0755); err != nil { + return fmt.Errorf("Failed to make the alloc directory %v: %v", d.AllocDir, err) } // Make the shared directory and make it available to all user/groups. - if err := allocMkdirAll(d.SharedDir, fileMode755); err != nil { + if err := os.MkdirAll(d.SharedDir, 0777); err != nil { + return err + } + + // Make the shared directory have non-root permissions. + if err := dropDirPermissions(d.SharedDir, os.ModePerm); err != nil { return err } // Create shared subdirs for _, dir := range SharedAllocDirs { p := filepath.Join(d.SharedDir, dir) - if err := allocMkdirAll(p, fileMode777); err != nil { + if err := os.MkdirAll(p, 0777); err != nil { + return err + } + if err := dropDirPermissions(p, os.ModePerm); err != nil { return err } } @@ -376,7 +364,7 @@ func (d *AllocDir) Build() error { // List returns the list of files at a path relative to the alloc dir func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) { if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { - return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } @@ -406,7 +394,7 @@ func (d *AllocDir) List(path string) ([]*cstructs.AllocFileInfo, error) { // Stat returns information about the file at a path relative to the alloc dir func (d *AllocDir) Stat(path string) (*cstructs.AllocFileInfo, error) { if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { - return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } @@ -456,7 +444,7 @@ func detectContentType(fileInfo os.FileInfo, path string) string { // ReadAt returns a reader for a file at the path relative to the alloc dir func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { - return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } @@ -482,7 +470,7 @@ func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { return nil, err } if _, err := f.Seek(offset, 0); err != nil { - return nil, fmt.Errorf("can't seek to offset %q: %w", offset, err) + return nil, fmt.Errorf("can't seek to offset %q: %v", offset, err) } return f, nil } @@ -491,7 +479,7 @@ func (d *AllocDir) ReadAt(path string, offset int64) (io.ReadCloser, error) { // directory exists. The block can be cancelled with the passed context. func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan error, error) { if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { - return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } @@ -517,7 +505,7 @@ func (d *AllocDir) BlockUntilExists(ctx context.Context, path string) (chan erro // used to clean up the watch. func (d *AllocDir) ChangeEvents(ctx context.Context, path string, curOffset int64) (*watch.FileChanges, error) { if escapes, err := escapingfs.PathEscapesAllocDir(d.AllocDir, "", path); err != nil { - return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %w", err) + return nil, fmt.Errorf("Failed to check if path escapes alloc directory: %v", err) } else if escapes { return nil, fmt.Errorf("Path escapes the alloc directory") } @@ -545,23 +533,23 @@ func fileCopy(src, dst string, uid, gid int, perm os.FileMode) error { // Do a simple copy. srcFile, err := os.Open(src) if err != nil { - return fmt.Errorf("Couldn't open src file %v: %w", src, err) + return fmt.Errorf("Couldn't open src file %v: %v", src, err) } defer srcFile.Close() dstFile, err := os.OpenFile(dst, os.O_WRONLY|os.O_CREATE, perm) if err != nil { - return fmt.Errorf("Couldn't create destination file %v: %w", dst, err) + return fmt.Errorf("Couldn't create destination file %v: %v", dst, err) } defer dstFile.Close() if _, err := io.Copy(dstFile, srcFile); err != nil { - return fmt.Errorf("Couldn't copy %q to %q: %w", src, dst, err) + return fmt.Errorf("Couldn't copy %q to %q: %v", src, dst, err) } if uid != idUnsupported && gid != idUnsupported { if err := dstFile.Chown(uid, gid); err != nil { - return fmt.Errorf("Couldn't copy %q to %q: %w", src, dst, err) + return fmt.Errorf("Couldn't copy %q to %q: %v", src, dst, err) } } @@ -641,7 +629,7 @@ func splitPath(path string) ([]fileInfo, error) { // flexible permission. uid, gid := idUnsupported, idUnsupported if err != nil { - mode = fileMode777 + mode = os.ModePerm } else { uid, gid = getOwner(fi) mode = fi.Mode() @@ -660,7 +648,7 @@ func splitPath(path string) ([]fileInfo, error) { uid, gid := idUnsupported, idUnsupported fi, err := os.Stat(dir) if err != nil { - mode = fileMode777 + mode = os.ModePerm } else { uid, gid = getOwner(fi) mode = fi.Mode() @@ -683,7 +671,7 @@ func writeError(tw *tar.Writer, allocID string, err error) error { contents := []byte(fmt.Sprintf("Error snapshotting: %v", err)) hdr := tar.Header{ Name: SnapshotErrorFilename(allocID), - Mode: int64(fileMode666), + Mode: 0666, Size: int64(len(contents)), AccessTime: SnapshotErrorTime, ChangeTime: SnapshotErrorTime, @@ -698,32 +686,3 @@ func writeError(tw *tar.Writer, allocID string, err error) error { _, err = tw.Write(contents) return err } - -// allocMkdirAll creates a directory and sets the permissions to the passed -// value. It also sets the owner of the directory to "nobody" on systems that -// allow. -func allocMkdirAll(path string, perms os.FileMode) error { - // Create the directory - if err := os.MkdirAll(path, perms); err != nil { - return err - } - // Update the access permissions on the directory - if err := dropDirPermissions(path, perms); err != nil { - return err - } - return nil -} - -// allocMakeSecretsDir creates a directory for sensitive items such as secrets. -// When possible it uses a tmpfs or some other method to prevent it from -// persisting to actual disk. -func allocMakeSecretsDir(path string, perms os.FileMode) error { - // Create the private directory - if err := createSecretDir(path); err != nil { - return err - } - if err := dropDirPermissions(path, perms); err != nil { - return err - } - return nil -} diff --git a/client/allocdir/alloc_dir_test.go b/client/allocdir/alloc_dir_test.go index 5f7e96fcb52c..e2e1ee8e88c6 100644 --- a/client/allocdir/alloc_dir_test.go +++ b/client/allocdir/alloc_dir_test.go @@ -19,25 +19,9 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" - "github.com/shoenig/test/must" - "golang.org/x/sys/unix" + "github.com/stretchr/testify/require" ) -// copy from testutil to avoid import cycle -func requireNonRoot(t *testing.T) { - if unix.Geteuid() == 0 { - t.Skip("must run as non-root") - } -} - -// copy from testutil to avoid import cycle -func requireRoot(t *testing.T) { - if unix.Geteuid() != 0 { - t.Skip("must run as root") - } -} - var ( t1 = &structs.Task{ Name: "web", @@ -70,11 +54,13 @@ func TestAllocDir_BuildAlloc(t *testing.T) { tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") defer d.Destroy() d.NewTaskDir(t1.Name) d.NewTaskDir(t2.Name) - must.NoError(t, d.Build()) + if err := d.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } // Check that the AllocDir and each of the task directories exist. if _, err := os.Stat(d.AllocDir); os.IsNotExist(err) { @@ -83,13 +69,17 @@ func TestAllocDir_BuildAlloc(t *testing.T) { for _, task := range []*structs.Task{t1, t2} { tDir, ok := d.TaskDirs[task.Name] - must.True(t, ok) + if !ok { + t.Fatalf("Task directory not found for %v", task.Name) + } - stat, _ := os.Stat(tDir.Dir) - must.Nil(t, stat) + if stat, _ := os.Stat(tDir.Dir); stat != nil { + t.Fatalf("Build() created TaskDir %v", tDir.Dir) + } - stat, _ = os.Stat(tDir.SecretsDir) - must.Nil(t, stat) + if stat, _ := os.Stat(tDir.SecretsDir); stat != nil { + t.Fatalf("Build() created secret dir %v", tDir.Dir) + } } } @@ -113,21 +103,28 @@ func TestAllocDir_MountSharedAlloc(t *testing.T) { tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") defer d.Destroy() - must.NoError(t, d.Build()) + if err := d.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } // Build 2 task dirs td1 := d.NewTaskDir(t1.Name) - must.NoError(t, td1.Build(fsisolation.Chroot, nil, "nobody")) - + if err := td1.Build(true, nil); err != nil { + t.Fatalf("error build task=%q dir: %v", t1.Name, err) + } td2 := d.NewTaskDir(t2.Name) - must.NoError(t, td2.Build(fsisolation.Chroot, nil, "nobody")) + if err := td2.Build(true, nil); err != nil { + t.Fatalf("error build task=%q dir: %v", t2.Name, err) + } // Write a file to the shared dir. contents := []byte("foo") const filename = "bar" - must.NoError(t, os.WriteFile(filepath.Join(d.SharedDir, filename), contents, 0o666)) + if err := os.WriteFile(filepath.Join(d.SharedDir, filename), contents, 0666); err != nil { + t.Fatalf("Couldn't write file to shared directory: %v", err) + } // Check that the file exists in the task directories for _, td := range []*TaskDir{td1, td2} { @@ -149,37 +146,52 @@ func TestAllocDir_Snapshot(t *testing.T) { tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") defer d.Destroy() - must.NoError(t, d.Build()) + if err := d.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } // Build 2 task dirs td1 := d.NewTaskDir(t1.Name) - must.NoError(t, td1.Build(fsisolation.None, nil, "nobody")) - + if err := td1.Build(false, nil); err != nil { + t.Fatalf("error build task=%q dir: %v", t1.Name, err) + } td2 := d.NewTaskDir(t2.Name) - must.NoError(t, td2.Build(fsisolation.None, nil, "nobody")) + if err := td2.Build(false, nil); err != nil { + t.Fatalf("error build task=%q dir: %v", t2.Name, err) + } // Write a file to the shared dir. exp := []byte{'f', 'o', 'o'} file := "bar" - must.NoError(t, os.WriteFile(filepath.Join(d.SharedDir, "data", file), exp, 0o666)) + if err := os.WriteFile(filepath.Join(d.SharedDir, "data", file), exp, 0666); err != nil { + t.Fatalf("Couldn't write file to shared directory: %v", err) + } // Write a symlink to the shared dir link := "qux" - must.NoError(t, os.Symlink("foo", filepath.Join(d.SharedDir, "data", link))) + if err := os.Symlink("foo", filepath.Join(d.SharedDir, "data", link)); err != nil { + t.Fatalf("Couldn't write symlink to shared directory: %v", err) + } // Write a file to the task local exp = []byte{'b', 'a', 'r'} file1 := "lol" - must.NoError(t, os.WriteFile(filepath.Join(td1.LocalDir, file1), exp, 0o666)) + if err := os.WriteFile(filepath.Join(td1.LocalDir, file1), exp, 0666); err != nil { + t.Fatalf("couldn't write file to task local directory: %v", err) + } // Write a symlink to the task local link1 := "baz" - must.NoError(t, os.Symlink("bar", filepath.Join(td1.LocalDir, link1))) + if err := os.Symlink("bar", filepath.Join(td1.LocalDir, link1)); err != nil { + t.Fatalf("couldn't write symlink to task local directory :%v", err) + } var b bytes.Buffer - must.NoError(t, d.Snapshot(&b)) + if err := d.Snapshot(&b); err != nil { + t.Fatalf("err: %v", err) + } tr := tar.NewReader(&b) var files []string @@ -199,8 +211,12 @@ func TestAllocDir_Snapshot(t *testing.T) { } } - must.SliceLen(t, 2, files) - must.SliceLen(t, 2, links) + if len(files) != 2 { + t.Fatalf("bad files: %#v", files) + } + if len(links) != 2 { + t.Fatalf("bad links: %#v", links) + } } func TestAllocDir_Move(t *testing.T) { @@ -210,16 +226,22 @@ func TestAllocDir_Move(t *testing.T) { tmp2 := t.TempDir() // Create two alloc dirs - d1 := NewAllocDir(testlog.HCLogger(t), tmp1, tmp1, "test") - must.NoError(t, d1.Build()) + d1 := NewAllocDir(testlog.HCLogger(t), tmp1, "test") + if err := d1.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } defer d1.Destroy() - d2 := NewAllocDir(testlog.HCLogger(t), tmp2, tmp2, "test") - must.NoError(t, d2.Build()) + d2 := NewAllocDir(testlog.HCLogger(t), tmp2, "test") + if err := d2.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } defer d2.Destroy() td1 := d1.NewTaskDir(t1.Name) - must.NoError(t, td1.Build(fsisolation.None, nil, "nobody")) + if err := td1.Build(false, nil); err != nil { + t.Fatalf("TaskDir.Build() faild: %v", err) + } // Create but don't build second task dir to mimic alloc/task runner // behavior (AllocDir.Move() is called pre-TaskDir.Build). @@ -230,24 +252,32 @@ func TestAllocDir_Move(t *testing.T) { // Write a file to the shared dir. exp1 := []byte("foo") file1 := "bar" - must.NoError(t, os.WriteFile(filepath.Join(dataDir, file1), exp1, 0o666)) + if err := os.WriteFile(filepath.Join(dataDir, file1), exp1, 0666); err != nil { + t.Fatalf("Couldn't write file to shared directory: %v", err) + } // Write a file to the task local exp2 := []byte("bar") file2 := "lol" - must.NoError(t, os.WriteFile(filepath.Join(td1.LocalDir, file2), exp2, 0o666)) + if err := os.WriteFile(filepath.Join(td1.LocalDir, file2), exp2, 0666); err != nil { + t.Fatalf("couldn't write to task local directory: %v", err) + } // Move the d1 allocdir to d2 - must.NoError(t, d2.Move(d1, []*structs.Task{t1})) + if err := d2.Move(d1, []*structs.Task{t1}); err != nil { + t.Fatalf("err: %v", err) + } // Ensure the files in d1 are present in d2 fi, err := os.Stat(filepath.Join(d2.SharedDir, SharedDataDir, file1)) - must.NoError(t, err) - must.NotNil(t, fi) + if err != nil || fi == nil { + t.Fatalf("data dir was not moved") + } fi, err = os.Stat(filepath.Join(d2.TaskDirs[t1.Name].LocalDir, file2)) - must.NoError(t, err) - must.NotNil(t, fi) + if err != nil || fi == nil { + t.Fatalf("task local dir was not moved") + } } func TestAllocDir_EscapeChecking(t *testing.T) { @@ -255,8 +285,10 @@ func TestAllocDir_EscapeChecking(t *testing.T) { tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") - must.NoError(t, d.Build()) + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") + if err := d.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } defer d.Destroy() // Check that issuing calls that escape the alloc dir returns errors @@ -291,23 +323,28 @@ func TestAllocDir_ReadAt_SecretDir(t *testing.T) { ci.Parallel(t) tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") - must.NoError(t, d.Build()) - defer func() { _ = d.Destroy() }() + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") + err := d.Build() + require.NoError(t, err) + defer func() { + _ = d.Destroy() + }() td := d.NewTaskDir(t1.Name) - must.NoError(t, td.Build(fsisolation.None, nil, "nobody")) + err = td.Build(false, nil) + require.NoError(t, err) // something to write and test reading target := filepath.Join(t1.Name, TaskSecrets, "test_file") // create target file in the task secrets dir full := filepath.Join(d.AllocDir, target) - must.NoError(t, os.WriteFile(full, []byte("hi"), 0o600)) + err = os.WriteFile(full, []byte("hi"), 0600) + require.NoError(t, err) // ReadAt of a file in the task secrets dir should fail - _, err := d.ReadAt(target, 0) - must.EqError(t, err, "Reading secret file prohibited: web/secrets/test_file") + _, err = d.ReadAt(target, 0) + require.EqualError(t, err, "Reading secret file prohibited: web/secrets/test_file") } func TestAllocDir_SplitPath(t *testing.T) { @@ -316,39 +353,54 @@ func TestAllocDir_SplitPath(t *testing.T) { dir := t.TempDir() dest := filepath.Join(dir, "/foo/bar/baz") - must.NoError(t, os.MkdirAll(dest, os.ModePerm)) + if err := os.MkdirAll(dest, os.ModePerm); err != nil { + t.Fatalf("err: %v", err) + } info, err := splitPath(dest) - must.NoError(t, err) - + if err != nil { + t.Fatalf("err: %v", err) + } // Testing that is 6 or more rather than 6 because on osx, the temp dir is // randomized. - must.GreaterEq(t, 6, len(info)) + if len(info) < 6 { + t.Fatalf("expected more than: %v, actual: %v", 6, len(info)) + } } func TestAllocDir_CreateDir(t *testing.T) { - requireRoot(t) - ci.Parallel(t) + if syscall.Geteuid() != 0 { + t.Skip("Must be root to run test") + } dir := t.TempDir() // create a subdir and a file subdir := filepath.Join(dir, "subdir") - must.NoError(t, os.MkdirAll(subdir, 0o760)) - + if err := os.MkdirAll(subdir, 0760); err != nil { + t.Fatalf("err: %v", err) + } subdirMode, err := os.Stat(subdir) - must.NoError(t, err) + if err != nil { + t.Fatalf("err: %v", err) + } // Create the above hierarchy under another destination dir1 := t.TempDir() - must.NoError(t, createDir(dir1, subdir)) + if err := createDir(dir1, subdir); err != nil { + t.Fatalf("err: %v", err) + } // Ensure that the subdir had the right perm fi, err := os.Stat(filepath.Join(dir1, dir, "subdir")) - must.NoError(t, err) - must.Eq(t, fi.Mode(), subdirMode.Mode()) + if err != nil { + t.Fatalf("err: %v", err) + } + if fi.Mode() != subdirMode.Mode() { + t.Fatalf("wrong file mode: %v, expected: %v", fi.Mode(), subdirMode.Mode()) + } } func TestPathFuncs(t *testing.T) { @@ -358,8 +410,12 @@ func TestPathFuncs(t *testing.T) { missingDir := filepath.Join(dir, "does-not-exist") - must.True(t, pathExists(dir)) - must.False(t, pathExists(missingDir)) + if !pathExists(dir) { + t.Errorf("%q exists", dir) + } + if pathExists(missingDir) { + t.Errorf("%q does not exist", missingDir) + } if empty, err := pathEmpty(dir); err != nil || !empty { t.Errorf("%q is empty and exists. empty=%v error=%v", dir, empty, err) @@ -370,7 +426,9 @@ func TestPathFuncs(t *testing.T) { filename := filepath.Join(dir, "just-some-file") f, err := os.Create(filename) - must.NoError(t, err) + if err != nil { + t.Fatalf("could not create %q: %v", filename, err) + } f.Close() if empty, err := pathEmpty(dir); err != nil || empty { @@ -380,6 +438,7 @@ func TestPathFuncs(t *testing.T) { func TestAllocDir_DetectContentType(t *testing.T) { ci.Parallel(t) + require := require.New(t) inputPath := "input/" var testFiles []string @@ -389,7 +448,7 @@ func TestAllocDir_DetectContentType(t *testing.T) { } return err }) - must.NoError(t, err) + require.Nil(err) expectedEncodings := map[string]string{ "input/happy.gif": "image/gif", @@ -403,9 +462,9 @@ func TestAllocDir_DetectContentType(t *testing.T) { } for _, file := range testFiles { fileInfo, err := os.Stat(file) - must.NoError(t, err) + require.Nil(err) res := detectContentType(fileInfo, file) - must.Eq(t, expectedEncodings[file], res) + require.Equal(expectedEncodings[file], res, "unexpected output for %v", file) } } @@ -423,11 +482,10 @@ func TestAllocDir_SkipAllocDir(t *testing.T) { rootDir := t.TempDir() clientAllocDir := filepath.Join(rootDir, "nomad") - mountAllocDir := filepath.Join(rootDir, "mounts") - must.NoError(t, os.Mkdir(clientAllocDir, fs.ModeDir|0o777)) + require.NoError(t, os.Mkdir(clientAllocDir, fs.ModeDir|0o777)) otherDir := filepath.Join(rootDir, "etc") - must.NoError(t, os.Mkdir(otherDir, fs.ModeDir|0o777)) + require.NoError(t, os.Mkdir(otherDir, fs.ModeDir|0o777)) // chroot contains client.alloc_dir! This could cause infinite // recursion. @@ -435,15 +493,15 @@ func TestAllocDir_SkipAllocDir(t *testing.T) { rootDir: "/", } - allocDir := NewAllocDir(testlog.HCLogger(t), clientAllocDir, mountAllocDir, "test") + allocDir := NewAllocDir(testlog.HCLogger(t), clientAllocDir, "test") taskDir := allocDir.NewTaskDir("testtask") - must.NoError(t, allocDir.Build()) + require.NoError(t, allocDir.Build()) defer allocDir.Destroy() // Build chroot - err := taskDir.Build(fsisolation.Chroot, chroot, "nobody") - must.NoError(t, err) + err := taskDir.Build(true, chroot) + require.NoError(t, err) // Assert other directory *was* embedded embeddedOtherDir := filepath.Join(clientAllocDir, "test", "testtask", "etc") diff --git a/client/allocdir/fs_darwin.go b/client/allocdir/fs_darwin.go index edd8c8958537..c749c3cade13 100644 --- a/client/allocdir/fs_darwin.go +++ b/client/allocdir/fs_darwin.go @@ -20,7 +20,7 @@ func unlinkDir(dir string) error { // createSecretDir creates the secrets dir folder at the given path func createSecretDir(dir string) error { - return os.MkdirAll(dir, fileMode777) + return os.MkdirAll(dir, 0777) } // removeSecretDir removes the secrets dir folder diff --git a/client/allocdir/fs_default.go b/client/allocdir/fs_default.go deleted file mode 100644 index 4f36ef3b7392..000000000000 --- a/client/allocdir/fs_default.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build !linux - -package allocdir - -import "os" - -// mountDir bind mounts old to next using the given file mode. -func mountDir(old, next string, uid, gid int, mode os.FileMode) error { - panic("not implemented") -} diff --git a/client/allocdir/fs_freebsd.go b/client/allocdir/fs_freebsd.go index edd8c8958537..c749c3cade13 100644 --- a/client/allocdir/fs_freebsd.go +++ b/client/allocdir/fs_freebsd.go @@ -20,7 +20,7 @@ func unlinkDir(dir string) error { // createSecretDir creates the secrets dir folder at the given path func createSecretDir(dir string) error { - return os.MkdirAll(dir, fileMode777) + return os.MkdirAll(dir, 0777) } // removeSecretDir removes the secrets dir folder diff --git a/client/allocdir/fs_linux.go b/client/allocdir/fs_linux.go index cbf7b993af72..798c1edc711a 100644 --- a/client/allocdir/fs_linux.go +++ b/client/allocdir/fs_linux.go @@ -1,8 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -//go:build linux - package allocdir import ( @@ -26,25 +24,13 @@ const ( // linkDir bind mounts src to dst as Linux doesn't support hardlinking // directories. func linkDir(src, dst string) error { - if err := os.MkdirAll(dst, fileMode777); err != nil { + if err := os.MkdirAll(dst, 0777); err != nil { return err } return syscall.Mount(src, dst, "", syscall.MS_BIND, "") } -// mountDir bind mounts old to next using the given file mode. -func mountDir(old, next string, uid, gid int, mode os.FileMode) error { - if err := os.MkdirAll(next, mode); err != nil { - return err - } - opts := unix.MS_BIND | unix.MS_NOSUID | unix.MS_NOATIME - if err := unix.Mount(old, next, "", uintptr(opts), ""); err != nil { - return err - } - return os.Chown(next, uid, gid) -} - // unlinkDir unmounts a bind mounted directory as Linux doesn't support // hardlinking directories. If the dir is already unmounted no error is // returned. @@ -62,7 +48,7 @@ func unlinkDir(dir string) error { func createSecretDir(dir string) error { // Only mount the tmpfs if we are root if unix.Geteuid() == 0 { - if err := os.MkdirAll(dir, fileMode777); err != nil { + if err := os.MkdirAll(dir, 0777); err != nil { return err } @@ -79,7 +65,7 @@ func createSecretDir(dir string) error { } // Create the marker file so we don't try to mount more than once - f, err := os.OpenFile(marker, os.O_RDWR|os.O_CREATE, fileMode666) + f, err := os.OpenFile(marker, os.O_RDWR|os.O_CREATE, 0666) if err != nil { // Hard fail since if this fails something is really wrong return err @@ -88,7 +74,7 @@ func createSecretDir(dir string) error { return nil } - return os.MkdirAll(dir, fileMode777) + return os.MkdirAll(dir, 0777) } // createSecretDir removes the secrets dir folder diff --git a/client/allocdir/fs_netbsd.go b/client/allocdir/fs_netbsd.go index edd8c8958537..c749c3cade13 100644 --- a/client/allocdir/fs_netbsd.go +++ b/client/allocdir/fs_netbsd.go @@ -20,7 +20,7 @@ func unlinkDir(dir string) error { // createSecretDir creates the secrets dir folder at the given path func createSecretDir(dir string) error { - return os.MkdirAll(dir, fileMode777) + return os.MkdirAll(dir, 0777) } // removeSecretDir removes the secrets dir folder diff --git a/client/allocdir/fs_solaris.go b/client/allocdir/fs_solaris.go index d35be1f7e5e3..1c35696247b7 100644 --- a/client/allocdir/fs_solaris.go +++ b/client/allocdir/fs_solaris.go @@ -21,7 +21,7 @@ func unlinkDir(dir string) error { // createSecretDir creates the secrets dir folder at the given path func createSecretDir(dir string) error { // TODO solaris has support for tmpfs so use that - return os.MkdirAll(dir, fileMode777) + return os.MkdirAll(dir, 0777) } // removeSecretDir removes the secrets dir folder diff --git a/client/allocdir/fs_unix.go b/client/allocdir/fs_unix.go index b393837be0d6..d3ad3b3ce566 100644 --- a/client/allocdir/fs_unix.go +++ b/client/allocdir/fs_unix.go @@ -34,8 +34,8 @@ var ( // dropDirPermissions gives full access to a directory to all users and sets // the owner to nobody. func dropDirPermissions(path string, desired os.FileMode) error { - if err := os.Chmod(path, desired|fileMode777); err != nil { - return fmt.Errorf("Chmod(%v) failed: %w", path, err) + if err := os.Chmod(path, desired|0777); err != nil { + return fmt.Errorf("Chmod(%v) failed: %v", path, err) } // Can't change owner if not root. @@ -59,7 +59,7 @@ func dropDirPermissions(path string, desired os.FileMode) error { } if err := os.Chown(path, uid, gid); err != nil { - return fmt.Errorf("Couldn't change owner/group of %v to (uid: %v, gid: %v): %w", path, uid, gid, err) + return fmt.Errorf("Couldn't change owner/group of %v to (uid: %v, gid: %v): %v", path, uid, gid, err) } return nil @@ -69,7 +69,7 @@ func dropDirPermissions(path string, desired os.FileMode) error { func getUid(u *user.User) (int, error) { uid, err := strconv.Atoi(u.Uid) if err != nil { - return 0, fmt.Errorf("Unable to convert Uid to an int: %w", err) + return 0, fmt.Errorf("Unable to convert Uid to an int: %v", err) } return uid, nil @@ -79,7 +79,7 @@ func getUid(u *user.User) (int, error) { func getGid(u *user.User) (int, error) { gid, err := strconv.Atoi(u.Gid) if err != nil { - return 0, fmt.Errorf("Unable to convert Gid to an int: %w", err) + return 0, fmt.Errorf("Unable to convert Gid to an int: %v", err) } return gid, nil diff --git a/client/allocdir/fs_windows.go b/client/allocdir/fs_windows.go index 3e3223810fe7..7794abb36140 100644 --- a/client/allocdir/fs_windows.go +++ b/client/allocdir/fs_windows.go @@ -39,7 +39,7 @@ func unlinkDir(dir string) error { // createSecretDir creates the secrets dir folder at the given path func createSecretDir(dir string) error { - return os.MkdirAll(dir, fileMode777) + return os.MkdirAll(dir, 0777) } // removeSecretDir removes the secrets dir folder diff --git a/client/allocdir/task_dir.go b/client/allocdir/task_dir.go index 644bd6c7aa2e..9e18a26115ee 100644 --- a/client/allocdir/task_dir.go +++ b/client/allocdir/task_dir.go @@ -8,75 +8,47 @@ import ( "os" "path/filepath" - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/go-set/v2" - "github.com/hashicorp/nomad/helper/users/dynamic" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" + hclog "github.com/hashicorp/go-hclog" ) // TaskDir contains all of the paths relevant to a task. All paths are on the // host system so drivers should mount/link into task containers as necessary. type TaskDir struct { - // AllocDir is the path to the alloc directory on the host. - // (not to be conflated with client.alloc_dir) - // - // + // AllocDir is the path to the alloc directory on the host AllocDir string - // Dir is the path to Task directory on the host. - // - // + // Dir is the path to Task directory on the host Dir string - // MountsAllocDir is the path to the alloc directory on the host that has - // been bind mounted under - // - // //alloc -> - MountsAllocDir string - - // MountsTaskDir is the path to the task directory on the host that has been - // bind mounted under - // - // //task -> - MountsTaskDir string - // SharedAllocDir is the path to shared alloc directory on the host - // // /alloc/ SharedAllocDir string // SharedTaskDir is the path to the shared alloc directory linked into // the task directory on the host. - // // /alloc/ SharedTaskDir string // LocalDir is the path to the task's local directory on the host - // // /local/ LocalDir string // LogDir is the path to the task's log directory on the host - // // /alloc/logs/ LogDir string // SecretsDir is the path to secrets/ directory on the host - // // /secrets/ SecretsDir string // PrivateDir is the path to private/ directory on the host - // // /private/ PrivateDir string // skip embedding these paths in chroots. Used for avoiding embedding - // client.alloc_dir and client.mounts_dir recursively. - skip *set.Set[string] + // client.alloc_dir recursively. + skip map[string]struct{} - // logger for this task logger hclog.Logger } @@ -84,43 +56,58 @@ type TaskDir struct { // create paths on disk. // // Call AllocDir.NewTaskDir to create new TaskDirs -func (d *AllocDir) newTaskDir(taskName string) *TaskDir { - taskDir := filepath.Join(d.AllocDir, taskName) - taskUnique := filepath.Base(d.AllocDir) + "-" + taskName +func newTaskDir(logger hclog.Logger, clientAllocDir, allocDir, taskName string) *TaskDir { + taskDir := filepath.Join(allocDir, taskName) + + logger = logger.Named("task_dir").With("task_name", taskName) + + // skip embedding client.alloc_dir in chroots + skip := map[string]struct{}{clientAllocDir: {}} return &TaskDir{ - AllocDir: d.AllocDir, + AllocDir: allocDir, Dir: taskDir, - SharedAllocDir: filepath.Join(d.AllocDir, SharedAllocName), - LogDir: filepath.Join(d.AllocDir, SharedAllocName, LogDirName), + SharedAllocDir: filepath.Join(allocDir, SharedAllocName), + LogDir: filepath.Join(allocDir, SharedAllocName, LogDirName), SharedTaskDir: filepath.Join(taskDir, SharedAllocName), LocalDir: filepath.Join(taskDir, TaskLocal), SecretsDir: filepath.Join(taskDir, TaskSecrets), PrivateDir: filepath.Join(taskDir, TaskPrivate), - MountsTaskDir: filepath.Join(d.clientAllocMountsDir, taskUnique, "task"), - MountsAllocDir: filepath.Join(d.clientAllocMountsDir, taskUnique, "alloc"), - skip: set.From[string]([]string{d.clientAllocDir, d.clientAllocMountsDir}), - logger: d.logger.Named("task_dir").With("task_name", taskName), + skip: skip, + logger: logger, } } // Build default directories and permissions in a task directory. chrootCreated // allows skipping chroot creation if the caller knows it has already been // done. client.alloc_dir will be skipped. -func (t *TaskDir) Build(fsi fsisolation.Mode, chroot map[string]string, username string) error { - if err := allocMkdirAll(t.Dir, fileMode777); err != nil { +func (t *TaskDir) Build(createChroot bool, chroot map[string]string) error { + if err := os.MkdirAll(t.Dir, 0777); err != nil { return err } - if err := allocMkdirAll(t.LocalDir, fileMode777); err != nil { + // Make the task directory have non-root permissions. + if err := dropDirPermissions(t.Dir, os.ModePerm); err != nil { + return err + } + + // Create a local directory that each task can use. + if err := os.MkdirAll(t.LocalDir, 0777); err != nil { + return err + } + + if err := dropDirPermissions(t.LocalDir, os.ModePerm); err != nil { return err } // Create the directories that should be in every task. for dir, perms := range TaskDirs { absdir := filepath.Join(t.Dir, dir) + if err := os.MkdirAll(absdir, perms); err != nil { + return err + } - if err := allocMkdirAll(absdir, perms); err != nil { + if err := dropDirPermissions(absdir, perms); err != nil { return err } } @@ -129,54 +116,41 @@ func (t *TaskDir) Build(fsi fsisolation.Mode, chroot map[string]string, username // Image based isolation will bind the shared alloc dir in the driver. // If there's no isolation the task will use the host path to the // shared alloc dir. - if fsi == fsisolation.Chroot { + if createChroot { // If the path doesn't exist OR it exists and is empty, link it empty, _ := pathEmpty(t.SharedTaskDir) if !pathExists(t.SharedTaskDir) || empty { if err := linkDir(t.SharedAllocDir, t.SharedTaskDir); err != nil { - return fmt.Errorf("Failed to mount shared directory for task: %w", err) + return fmt.Errorf("Failed to mount shared directory for task: %v", err) } } } // Create the secret directory - if err := allocMakeSecretsDir(t.SecretsDir, fileMode777); err != nil { + if err := createSecretDir(t.SecretsDir); err != nil { + return err + } + + if err := dropDirPermissions(t.SecretsDir, os.ModePerm); err != nil { return err } // Create the private directory - if err := allocMakeSecretsDir(t.PrivateDir, fileMode777); err != nil { + if err := createSecretDir(t.PrivateDir); err != nil { + return err + } + + if err := dropDirPermissions(t.PrivateDir, os.ModePerm); err != nil { return err } // Build chroot if chroot filesystem isolation is going to be used - if fsi == fsisolation.Chroot { + if createChroot { if err := t.buildChroot(chroot); err != nil { return err } } - // Only bind mount the task alloc/task dirs to the client.mounts_dir/ - if fsi == fsisolation.Unveil { - uid, gid, _, err := dynamic.LookupUser(username) - if err != nil { - return fmt.Errorf("Failed to lookup user: %v", err) - } - - // create the task unique directory under the client mounts path - parent := filepath.Dir(t.MountsAllocDir) - if err = os.MkdirAll(parent, fileMode710); err != nil { - return fmt.Errorf("Failed to create task mount directory: %v", err) - } - if err = os.Chown(parent, uid, gid); err != nil { - return fmt.Errorf("Failed to chown task mount directory: %v", err) - } - - // create the task and alloc mount points - mountDir(t.AllocDir, t.MountsAllocDir, uid, gid, fileMode710) - mountDir(t.Dir, t.MountsTaskDir, uid, gid, fileMode710) - } - return nil } @@ -191,7 +165,7 @@ func (t *TaskDir) buildChroot(entries map[string]string) error { func (t *TaskDir) embedDirs(entries map[string]string) error { subdirs := make(map[string]string) for source, dest := range entries { - if t.skip.Contains(source) { + if _, ok := t.skip[source]; ok { // source in skip list continue } @@ -205,7 +179,7 @@ func (t *TaskDir) embedDirs(entries map[string]string) error { // Embedding a single file if !s.IsDir() { if err := createDir(t.Dir, filepath.Dir(dest)); err != nil { - return fmt.Errorf("Couldn't create destination directory %v: %w", dest, err) + return fmt.Errorf("Couldn't create destination directory %v: %v", dest, err) } // Copy the file. @@ -222,19 +196,19 @@ func (t *TaskDir) embedDirs(entries map[string]string) error { destDir := filepath.Join(t.Dir, dest) if err := createDir(t.Dir, dest); err != nil { - return fmt.Errorf("Couldn't create destination directory %v: %w", destDir, err) + return fmt.Errorf("Couldn't create destination directory %v: %v", destDir, err) } // Enumerate the files in source. dirEntries, err := os.ReadDir(source) if err != nil { - return fmt.Errorf("Couldn't read directory %v: %w", source, err) + return fmt.Errorf("Couldn't read directory %v: %v", source, err) } for _, fileEntry := range dirEntries { entry, err := fileEntry.Info() if err != nil { - return fmt.Errorf("Couldn't read the file information %v: %w", entry, err) + return fmt.Errorf("Couldn't read the file information %v: %v", entry, err) } hostEntry := filepath.Join(source, entry.Name()) taskEntry := filepath.Join(destDir, filepath.Base(hostEntry)) @@ -257,13 +231,13 @@ func (t *TaskDir) embedDirs(entries map[string]string) error { link, err := os.Readlink(hostEntry) if err != nil { - return fmt.Errorf("Couldn't resolve symlink for %v: %w", source, err) + return fmt.Errorf("Couldn't resolve symlink for %v: %v", source, err) } if err := os.Symlink(link, taskEntry); err != nil { // Symlinking twice if err.(*os.LinkError).Err.Error() != "file exists" { - return fmt.Errorf("Couldn't create symlink: %w", err) + return fmt.Errorf("Couldn't create symlink: %v", err) } } continue @@ -283,55 +257,3 @@ func (t *TaskDir) embedDirs(entries map[string]string) error { return nil } - -// Unmount or delete task directories. Returns all errors as a multierror. -func (t *TaskDir) Unmount() error { - mErr := new(multierror.Error) - - // Check if the directory has the shared alloc mounted. - if pathExists(t.SharedTaskDir) { - if err := unlinkDir(t.SharedTaskDir); err != nil { - mErr = multierror.Append(mErr, - fmt.Errorf("failed to unmount shared alloc dir %q: %w", t.SharedTaskDir, err)) - } else if err := os.RemoveAll(t.SharedTaskDir); err != nil { - mErr = multierror.Append(mErr, - fmt.Errorf("failed to delete shared alloc dir %q: %w", t.SharedTaskDir, err)) - } - } - - if pathExists(t.SecretsDir) { - if err := removeSecretDir(t.SecretsDir); err != nil { - mErr = multierror.Append(mErr, - fmt.Errorf("failed to remove the secret dir %q: %w", t.SecretsDir, err)) - } - } - - if pathExists(t.PrivateDir) { - if err := removeSecretDir(t.PrivateDir); err != nil { - mErr = multierror.Append(mErr, - fmt.Errorf("failed to remove the private dir %q: %w", t.PrivateDir, err)) - } - } - - if pathExists(t.MountsAllocDir) { - if err := unlinkDir(t.MountsAllocDir); err != nil { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("failed to remove the alloc mounts dir %q: %w", t.MountsAllocDir, err), - ) - } - } - - if pathExists(t.MountsTaskDir) { - if err := unlinkDir(t.MountsTaskDir); err != nil { - mErr.Errors = append(mErr.Errors, - fmt.Errorf("failed to remove the alloc mounts task dir %q: %w", t.MountsTaskDir, err), - ) - } - } - - // Unmount dev/ and proc/ have been mounted. - if err := t.unmountSpecialDirs(); err != nil { - mErr = multierror.Append(mErr, err) - } - return mErr.ErrorOrNil() -} diff --git a/client/allocdir/task_dir_linux.go b/client/allocdir/task_dir_linux.go index d11747cd6cda..2cb652aabcba 100644 --- a/client/allocdir/task_dir_linux.go +++ b/client/allocdir/task_dir_linux.go @@ -15,13 +15,13 @@ import ( // error is returned if the directories do not exist or have already been // unmounted. func (t *TaskDir) unmountSpecialDirs() error { - mErr := new(multierror.Error) + errs := new(multierror.Error) dev := filepath.Join(t.Dir, "dev") if pathExists(dev) { if err := unlinkDir(dev); err != nil { - mErr = multierror.Append(mErr, fmt.Errorf("Failed to unmount dev %q: %w", dev, err)) + errs = multierror.Append(errs, fmt.Errorf("Failed to unmount dev %q: %v", dev, err)) } else if err := os.RemoveAll(dev); err != nil { - mErr = multierror.Append(mErr, fmt.Errorf("Failed to delete dev directory %q: %w", dev, err)) + errs = multierror.Append(errs, fmt.Errorf("Failed to delete dev directory %q: %v", dev, err)) } } @@ -29,11 +29,11 @@ func (t *TaskDir) unmountSpecialDirs() error { proc := filepath.Join(t.Dir, "proc") if pathExists(proc) { if err := unlinkDir(proc); err != nil { - mErr = multierror.Append(mErr, fmt.Errorf("Failed to unmount proc %q: %w", proc, err)) + errs = multierror.Append(errs, fmt.Errorf("Failed to unmount proc %q: %v", proc, err)) } else if err := os.RemoveAll(proc); err != nil { - mErr = multierror.Append(mErr, fmt.Errorf("Failed to delete proc directory %q: %w", dev, err)) + errs = multierror.Append(errs, fmt.Errorf("Failed to delete proc directory %q: %v", dev, err)) } } - return mErr.ErrorOrNil() + return errs.ErrorOrNil() } diff --git a/client/allocdir/task_dir_test.go b/client/allocdir/task_dir_test.go index a92a79e5e0cc..d99b2af90ef4 100644 --- a/client/allocdir/task_dir_test.go +++ b/client/allocdir/task_dir_test.go @@ -5,14 +5,11 @@ package allocdir import ( "os" - "os/user" "path/filepath" "testing" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" - "github.com/shoenig/test/must" ) // Test that building a chroot will skip nonexistent directories. @@ -21,14 +18,18 @@ func TestTaskDir_EmbedNonexistent(t *testing.T) { tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") defer d.Destroy() td := d.NewTaskDir(t1.Name) - must.NoError(t, d.Build()) + if err := d.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } fakeDir := "/foobarbaz" mapping := map[string]string{fakeDir: fakeDir} - must.NoError(t, td.embedDirs(mapping)) + if err := td.embedDirs(mapping); err != nil { + t.Fatalf("embedDirs(%v) should should skip %v since it does not exist", mapping, fakeDir) + } } // Test that building a chroot copies files from the host into the task dir. @@ -37,10 +38,12 @@ func TestTaskDir_EmbedDirs(t *testing.T) { tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") defer d.Destroy() td := d.NewTaskDir(t1.Name) - must.NoError(t, d.Build()) + if err := d.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } // Create a fake host directory, with a file, and a subfolder that contains // a file. @@ -48,17 +51,26 @@ func TestTaskDir_EmbedDirs(t *testing.T) { subDirName := "subdir" subDir := filepath.Join(host, subDirName) - must.NoError(t, os.MkdirAll(subDir, 0o777)) + if err := os.MkdirAll(subDir, 0777); err != nil { + t.Fatalf("Failed to make subdir %v: %v", subDir, err) + } file := "foo" subFile := "bar" - must.NoError(t, os.WriteFile(filepath.Join(host, file), []byte{'a'}, 0o777)) - must.NoError(t, os.WriteFile(filepath.Join(subDir, subFile), []byte{'a'}, 0o777)) + if err := os.WriteFile(filepath.Join(host, file), []byte{'a'}, 0777); err != nil { + t.Fatalf("Couldn't create file in host dir %v: %v", host, err) + } + + if err := os.WriteFile(filepath.Join(subDir, subFile), []byte{'a'}, 0777); err != nil { + t.Fatalf("Couldn't create file in host subdir %v: %v", subDir, err) + } // Create mapping from host dir to task dir. taskDest := "bin/test/" mapping := map[string]string{host: taskDest} - must.NoError(t, td.embedDirs(mapping)) + if err := td.embedDirs(mapping); err != nil { + t.Fatalf("embedDirs(%v) failed: %v", mapping, err) + } exp := []string{filepath.Join(td.Dir, taskDest, file), filepath.Join(td.Dir, taskDest, subDirName, subFile)} for _, f := range exp { @@ -70,75 +82,46 @@ func TestTaskDir_EmbedDirs(t *testing.T) { // Test that task dirs for image based isolation don't require root. func TestTaskDir_NonRoot_Image(t *testing.T) { - requireNonRoot(t) - ci.Parallel(t) - + if os.Geteuid() == 0 { + t.Skip("test should be run as non-root user") + } tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") defer d.Destroy() td := d.NewTaskDir(t1.Name) - must.NoError(t, d.Build()) - must.NoError(t, td.Build(fsisolation.Image, nil, "nobody")) + if err := d.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } + + if err := td.Build(false, nil); err != nil { + t.Fatalf("TaskDir.Build failed: %v", err) + } } // Test that task dirs with no isolation don't require root. func TestTaskDir_NonRoot(t *testing.T) { - requireNonRoot(t) - ci.Parallel(t) + if os.Geteuid() == 0 { + t.Skip("test should be run as non-root user") + } tmp := t.TempDir() - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") + d := NewAllocDir(testlog.HCLogger(t), tmp, "test") defer d.Destroy() td := d.NewTaskDir(t1.Name) - must.NoError(t, d.Build()) - must.NoError(t, td.Build(fsisolation.None, nil, "nobody")) + if err := d.Build(); err != nil { + t.Fatalf("Build() failed: %v", err) + } + + if err := td.Build(false, nil); err != nil { + t.Fatalf("TaskDir.Build failed: %v", err) + } // ${TASK_DIR}/alloc should not exist! if _, err := os.Stat(td.SharedTaskDir); !os.IsNotExist(err) { t.Fatalf("Expected a NotExist error for shared alloc dir in task dir: %q", td.SharedTaskDir) } } - -func TestTaskDir_NonRoot_Unveil(t *testing.T) { - requireNonRoot(t) - - ci.Parallel(t) - - tmp := t.TempDir() - - // non-root, should still work for tasks running as the same user as the - // nomad client agent - u, err := user.Current() - must.NoError(t, err) - - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") - defer d.Destroy() - td := d.NewTaskDir(t1.Name) - must.NoError(t, d.Build()) - must.NoError(t, td.Build(fsisolation.Unveil, nil, u.Username)) - fi, err := os.Stat(td.MountsTaskDir) - must.NoError(t, err) - must.NotNil(t, fi) -} - -func TestTaskDir_Root_Unveil(t *testing.T) { - requireRoot(t) - - ci.Parallel(t) - - tmp := t.TempDir() - - // root, can build task dirs for another user - d := NewAllocDir(testlog.HCLogger(t), tmp, tmp, "test") - defer d.Destroy() - td := d.NewTaskDir(t1.Name) - must.NoError(t, d.Build()) - must.NoError(t, td.Build(fsisolation.Unveil, nil, "nobody")) - fi, err := os.Stat(td.MountsTaskDir) - must.NoError(t, err) - must.NotNil(t, fi) -} diff --git a/client/allocdir/testing.go b/client/allocdir/testing.go index 6efd22a41d9a..382bb631cc4f 100644 --- a/client/allocdir/testing.go +++ b/client/allocdir/testing.go @@ -18,7 +18,7 @@ func TestAllocDir(t testing.T, l hclog.Logger, prefix, id string) (*AllocDir, fu t.Fatalf("Couldn't create temp dir: %v", err) } - allocDir := NewAllocDir(l, dir, dir, id) + allocDir := NewAllocDir(l, dir, id) cleanup := func() { if err := os.RemoveAll(dir); err != nil { diff --git a/client/allocrunner/alloc_runner.go b/client/allocrunner/alloc_runner.go index c88b90bc036e..db0e5dee4817 100644 --- a/client/allocrunner/alloc_runner.go +++ b/client/allocrunner/alloc_runner.go @@ -34,7 +34,6 @@ import ( "github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/client/widmgr" "github.com/hashicorp/nomad/helper/pointer" - "github.com/hashicorp/nomad/helper/users/dynamic" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/device" "github.com/hashicorp/nomad/plugins/drivers" @@ -133,7 +132,7 @@ type allocRunner struct { stateDB cstate.StateDB // allocDir is used to build the allocations directory structure. - allocDir allocdir.Interface + allocDir *allocdir.AllocDir // runnerHooks are alloc runner lifecycle hooks that should be run on state // transitions. @@ -212,9 +211,6 @@ type allocRunner struct { // widmgr manages workload identity signatures widmgr widmgr.IdentityManager - - // users manages a pool of dynamic workload users - users dynamic.Pool } // NewAllocRunner returns a new allocation runner. @@ -259,7 +255,6 @@ func NewAllocRunner(config *config.AllocRunnerConfig) (interfaces.AllocRunner, e partitions: config.Partitions, hookResources: cstructs.NewAllocHookResources(), widsigner: config.WIDSigner, - users: config.Users, } // Create the logger based on the allocation ID @@ -269,15 +264,7 @@ func NewAllocRunner(config *config.AllocRunnerConfig) (interfaces.AllocRunner, e ar.allocBroadcaster = cstructs.NewAllocBroadcaster(ar.logger) // Create alloc dir - // - // TODO(shoenig): need to decide what version of alloc dir to use, and the - // return value should probably now be an interface - ar.allocDir = allocdir.NewAllocDir( - ar.logger, - config.ClientConfig.AllocDir, - config.ClientConfig.AllocMountsDir, - alloc.ID, - ) + ar.allocDir = allocdir.NewAllocDir(ar.logger, config.ClientConfig.AllocDir, alloc.ID) ar.taskCoordinator = tasklifecycle.NewCoordinator(ar.logger, tg.Tasks, ar.waitCh) @@ -330,7 +317,6 @@ func (ar *allocRunner) initTaskRunners(tasks []*structs.Task) error { Wranglers: ar.wranglers, AllocHookResources: ar.hookResources, WIDMgr: ar.widmgr, - Users: ar.users, } // Create, but do not Run, the task runner @@ -446,7 +432,7 @@ func (ar *allocRunner) setAlloc(updated *structs.Allocation) { } // GetAllocDir returns the alloc dir which is safe for concurrent use. -func (ar *allocRunner) GetAllocDir() allocdir.Interface { +func (ar *allocRunner) GetAllocDir() *allocdir.AllocDir { return ar.allocDir } diff --git a/client/allocrunner/alloc_runner_hooks.go b/client/allocrunner/alloc_runner_hooks.go index d95e492941a2..f36001cb2649 100644 --- a/client/allocrunner/alloc_runner_hooks.go +++ b/client/allocrunner/alloc_runner_hooks.go @@ -106,12 +106,8 @@ func (ar *allocRunner) initRunnerHooks(config *clientconfig.Config) error { // Create a new taskenv.Builder which is used by hooks that mutate them to // build new taskenv.TaskEnv. newEnvBuilder := func() *taskenv.Builder { - return taskenv.NewBuilder( - config.Node, - ar.Alloc(), - nil, - config.Region, - ).SetAllocDir(ar.allocDir.AllocDirPath()) + return taskenv.NewBuilder(config.Node, ar.Alloc(), nil, config.Region). + SetAllocDir(ar.allocDir.AllocDir) } // Create a taskenv.TaskEnv which is used for read only purposes by the diff --git a/client/allocrunner/alloc_runner_test.go b/client/allocrunner/alloc_runner_test.go index 71cad1c8e6bb..b52e0e567f48 100644 --- a/client/allocrunner/alloc_runner_test.go +++ b/client/allocrunner/alloc_runner_test.go @@ -1641,8 +1641,8 @@ func TestAllocRunner_Destroy(t *testing.T) { require.Nil(t, ts) // Assert the alloc directory was cleaned - if _, err := os.Stat(ar.(*allocRunner).allocDir.AllocDirPath()); err == nil { - require.Fail(t, "alloc dir still exists: %v", ar.(*allocRunner).allocDir.AllocDirPath()) + if _, err := os.Stat(ar.(*allocRunner).allocDir.AllocDir); err == nil { + require.Fail(t, "alloc dir still exists: %v", ar.(*allocRunner).allocDir.AllocDir) } else if !os.IsNotExist(err) { require.Failf(t, "expected NotExist error", "found %v", err) } @@ -1700,9 +1700,9 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) { // Step 2. Modify its directory task := alloc.Job.TaskGroups[0].Tasks[0] - dataFile := filepath.Join(ar.GetAllocDir().ShareDirPath(), "data", "data_file") + dataFile := filepath.Join(ar.GetAllocDir().SharedDir, "data", "data_file") os.WriteFile(dataFile, []byte("hello world"), os.ModePerm) - taskDir := ar.GetAllocDir().GetTaskDir(task.Name) + taskDir := ar.GetAllocDir().TaskDirs[task.Name] taskLocalFile := filepath.Join(taskDir.LocalDir, "local_file") os.WriteFile(taskLocalFile, []byte("good bye world"), os.ModePerm) @@ -1727,11 +1727,11 @@ func TestAllocRunner_MoveAllocDir(t *testing.T) { WaitForClientState(t, ar, structs.AllocClientStatusComplete) // Ensure that data from ar was moved to ar2 - dataFile = filepath.Join(ar2.GetAllocDir().ShareDirPath(), "data", "data_file") + dataFile = filepath.Join(ar2.GetAllocDir().SharedDir, "data", "data_file") fileInfo, _ := os.Stat(dataFile) require.NotNilf(t, fileInfo, "file %q not found", dataFile) - taskDir = ar2.GetAllocDir().GetTaskDir(task.Name) + taskDir = ar2.GetAllocDir().TaskDirs[task.Name] taskLocalFile = filepath.Join(taskDir.LocalDir, "local_file") fileInfo, _ = os.Stat(taskLocalFile) require.NotNilf(t, fileInfo, "file %q not found", dataFile) @@ -1987,8 +1987,8 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) { } // Check the alloc directory still exists - if _, err := os.Stat(ar.GetAllocDir().AllocDirPath()); err != nil { - return false, fmt.Errorf("alloc dir destroyed: %v", ar.GetAllocDir().AllocDirPath()) + if _, err := os.Stat(ar.GetAllocDir().AllocDir); err != nil { + return false, fmt.Errorf("alloc dir destroyed: %v", ar.GetAllocDir().AllocDir) } return true, nil @@ -2011,8 +2011,8 @@ func TestAllocRunner_TerminalUpdate_Destroy(t *testing.T) { } // Check the alloc directory was cleaned - if _, err := os.Stat(ar.GetAllocDir().AllocDirPath()); err == nil { - return false, fmt.Errorf("alloc dir still exists: %v", ar.GetAllocDir().AllocDirPath()) + if _, err := os.Stat(ar.GetAllocDir().AllocDir); err == nil { + return false, fmt.Errorf("alloc dir still exists: %v", ar.GetAllocDir().AllocDir) } else if !os.IsNotExist(err) { return false, fmt.Errorf("stat err: %v", err) } diff --git a/client/allocrunner/allocdir_hook.go b/client/allocrunner/allocdir_hook.go index f575be44a1cb..c132c3ce4984 100644 --- a/client/allocrunner/allocdir_hook.go +++ b/client/allocrunner/allocdir_hook.go @@ -4,18 +4,18 @@ package allocrunner import ( - "github.com/hashicorp/go-hclog" + log "github.com/hashicorp/go-hclog" "github.com/hashicorp/nomad/client/allocdir" ) // allocDirHook creates and destroys the root directory and shared directories // for an allocation. type allocDirHook struct { - allocDir allocdir.Interface - logger hclog.Logger + allocDir *allocdir.AllocDir + logger log.Logger } -func newAllocDirHook(logger hclog.Logger, allocDir allocdir.Interface) *allocDirHook { +func newAllocDirHook(logger log.Logger, allocDir *allocdir.AllocDir) *allocDirHook { ad := &allocDirHook{ allocDir: allocDir, } diff --git a/client/allocrunner/consul_grpc_sock_hook.go b/client/allocrunner/consul_grpc_sock_hook.go index 492f06d09925..2ee814bf6031 100644 --- a/client/allocrunner/consul_grpc_sock_hook.go +++ b/client/allocrunner/consul_grpc_sock_hook.go @@ -56,12 +56,8 @@ type consulGRPCSocketHook struct { } func newConsulGRPCSocketHook( - logger hclog.Logger, - alloc *structs.Allocation, - allocDir allocdir.Interface, - configs map[string]*config.ConsulConfig, - nodeAttrs map[string]string, -) *consulGRPCSocketHook { + logger hclog.Logger, alloc *structs.Allocation, allocDir *allocdir.AllocDir, + configs map[string]*config.ConsulConfig, nodeAttrs map[string]string) *consulGRPCSocketHook { // Get the deduplicated set of Consul clusters that are needed by this // alloc. For Nomad CE, this will always be just the default cluster. @@ -85,12 +81,8 @@ func newConsulGRPCSocketHook( consulGRPCPort = consulGRPCFallbackPort } - proxies[clusterName] = newGRPCSocketProxy( - logger, - allocDir, - configs[clusterName], - consulGRPCPort, - ) + proxies[clusterName] = newGRPCSocketProxy(logger, allocDir, + configs[clusterName], consulGRPCPort) return true }) @@ -182,7 +174,7 @@ func (h *consulGRPCSocketHook) Postrun() error { type grpcSocketProxy struct { logger hclog.Logger - allocDir allocdir.Interface + allocDir *allocdir.AllocDir config *config.ConsulConfig // consulGRPCFallbackPort is the port to use if the operator did not @@ -196,11 +188,8 @@ type grpcSocketProxy struct { } func newGRPCSocketProxy( - logger hclog.Logger, - allocDir allocdir.Interface, - config *config.ConsulConfig, - consulGRPCFallbackPort string, -) *grpcSocketProxy { + logger hclog.Logger, allocDir *allocdir.AllocDir, config *config.ConsulConfig, + consulGRPCFallbackPort string) *grpcSocketProxy { ctx, cancel := context.WithCancel(context.Background()) return &grpcSocketProxy{ @@ -257,7 +246,7 @@ func (p *grpcSocketProxy) run(alloc *structs.Allocation) error { socketFile = filepath.Join(allocdir.SharedAllocName, allocdir.TmpDirName, "consul_"+p.config.Name+"_grpc.sock") } - hostGRPCSocketPath := filepath.Join(p.allocDir.AllocDirPath(), socketFile) + hostGRPCSocketPath := filepath.Join(p.allocDir.AllocDir, socketFile) // if the socket already exists we'll try to remove it, but if not then any // other errors will bubble up to the caller here or when we try to listen diff --git a/client/allocrunner/consul_hook.go b/client/allocrunner/consul_hook.go index 06514dde5087..22b040864630 100644 --- a/client/allocrunner/consul_hook.go +++ b/client/allocrunner/consul_hook.go @@ -19,7 +19,7 @@ import ( type consulHook struct { alloc *structs.Allocation - allocdir allocdir.Interface + allocdir *allocdir.AllocDir widmgr widmgr.IdentityManager consulConfigs map[string]*structsc.ConsulConfig consulClientConstructor func(*structsc.ConsulConfig, log.Logger) (consul.Client, error) @@ -30,7 +30,7 @@ type consulHook struct { type consulHookConfig struct { alloc *structs.Allocation - allocdir allocdir.Interface + allocdir *allocdir.AllocDir widmgr widmgr.IdentityManager // consulConfigs is a map of cluster names to Consul configs diff --git a/client/allocrunner/consul_http_sock_hook.go b/client/allocrunner/consul_http_sock_hook.go index f4990974a6e9..eaf5359843d0 100644 --- a/client/allocrunner/consul_http_sock_hook.go +++ b/client/allocrunner/consul_http_sock_hook.go @@ -43,12 +43,7 @@ type consulHTTPSockHook struct { proxies map[string]*httpSocketProxy } -func newConsulHTTPSocketHook( - logger hclog.Logger, - alloc *structs.Allocation, - allocDir allocdir.Interface, - configs map[string]*config.ConsulConfig, -) *consulHTTPSockHook { +func newConsulHTTPSocketHook(logger hclog.Logger, alloc *structs.Allocation, allocDir *allocdir.AllocDir, configs map[string]*config.ConsulConfig) *consulHTTPSockHook { // Get the deduplicated set of Consul clusters that are needed by this // alloc. For Nomad CE, this will always be just the default cluster. @@ -60,11 +55,7 @@ func newConsulHTTPSocketHook( proxies := map[string]*httpSocketProxy{} clusterNames.ForEach(func(clusterName string) bool { - proxies[clusterName] = newHTTPSocketProxy( - logger, - allocDir, - configs[clusterName], - ) + proxies[clusterName] = newHTTPSocketProxy(logger, allocDir, configs[clusterName]) return true }) @@ -155,7 +146,7 @@ func (h *consulHTTPSockHook) Postrun() error { type httpSocketProxy struct { logger hclog.Logger - allocDir allocdir.Interface + allocDir *allocdir.AllocDir config *config.ConsulConfig ctx context.Context @@ -164,11 +155,7 @@ type httpSocketProxy struct { runOnce bool } -func newHTTPSocketProxy( - logger hclog.Logger, - allocDir allocdir.Interface, - config *config.ConsulConfig, -) *httpSocketProxy { +func newHTTPSocketProxy(logger hclog.Logger, allocDir *allocdir.AllocDir, config *config.ConsulConfig) *httpSocketProxy { ctx, cancel := context.WithCancel(context.Background()) return &httpSocketProxy{ logger: logger, @@ -211,7 +198,7 @@ func (p *httpSocketProxy) run(alloc *structs.Allocation) error { socketFile = filepath.Join(allocdir.SharedAllocName, allocdir.TmpDirName, "consul_"+p.config.Name+"_http.sock") } - hostHTTPSockPath := filepath.Join(p.allocDir.AllocDirPath(), socketFile) + hostHTTPSockPath := filepath.Join(p.allocDir.AllocDir, socketFile) if err := maybeRemoveOldSocket(hostHTTPSockPath); err != nil { return err } diff --git a/client/allocrunner/interfaces/runner.go b/client/allocrunner/interfaces/runner.go index ed13edea73a3..7e94f0306091 100644 --- a/client/allocrunner/interfaces/runner.go +++ b/client/allocrunner/interfaces/runner.go @@ -48,7 +48,7 @@ type AllocRunner interface { GetTaskDriverCapabilities(taskName string) (*drivers.Capabilities, error) StatsReporter() AllocStatsReporter Listener() *cstructs.AllocListener - GetAllocDir() allocdir.Interface + GetAllocDir() *allocdir.AllocDir } // TaskStateHandler exposes a handler to be called when a task's state changes diff --git a/client/allocrunner/migrate_hook.go b/client/allocrunner/migrate_hook.go index 872ec8679934..08ed839c727f 100644 --- a/client/allocrunner/migrate_hook.go +++ b/client/allocrunner/migrate_hook.go @@ -15,16 +15,12 @@ import ( // diskMigrationHook migrates ephemeral disk volumes. Depends on alloc dir // being built but must be run before anything else manipulates the alloc dir. type diskMigrationHook struct { - allocDir allocdir.Interface + allocDir *allocdir.AllocDir allocWatcher config.PrevAllocMigrator logger log.Logger } -func newDiskMigrationHook( - logger log.Logger, - allocWatcher config.PrevAllocMigrator, - allocDir allocdir.Interface, -) *diskMigrationHook { +func newDiskMigrationHook(logger log.Logger, allocWatcher config.PrevAllocMigrator, allocDir *allocdir.AllocDir) *diskMigrationHook { h := &diskMigrationHook{ allocDir: allocDir, allocWatcher: allocWatcher, diff --git a/client/allocrunner/taskrunner/connect_native_hook_test.go b/client/allocrunner/taskrunner/connect_native_hook_test.go index 693c859ae7ea..a3e7615b7b01 100644 --- a/client/allocrunner/taskrunner/connect_native_hook_test.go +++ b/client/allocrunner/taskrunner/connect_native_hook_test.go @@ -24,7 +24,6 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/stretchr/testify/require" ) @@ -281,7 +280,7 @@ func TestTaskRunner_ConnectNativeHook_Noop(t *testing.T) { Task: task, TaskDir: allocDir.NewTaskDir(task.Name), } - require.NoError(t, request.TaskDir.Build(fsisolation.None, nil, task.User)) + require.NoError(t, request.TaskDir.Build(false, nil)) response := new(interfaces.TaskPrestartResponse) @@ -343,7 +342,7 @@ func TestTaskRunner_ConnectNativeHook_Ok(t *testing.T) { TaskDir: allocDir.NewTaskDir(tg.Tasks[0].Name), TaskEnv: taskenv.NewEmptyTaskEnv(), } - require.NoError(t, request.TaskDir.Build(fsisolation.None, nil, tg.Tasks[0].User)) + require.NoError(t, request.TaskDir.Build(false, nil)) response := new(interfaces.TaskPrestartResponse) @@ -405,7 +404,7 @@ func TestTaskRunner_ConnectNativeHook_with_SI_token(t *testing.T) { TaskDir: allocDir.NewTaskDir(tg.Tasks[0].Name), TaskEnv: taskenv.NewEmptyTaskEnv(), } - require.NoError(t, request.TaskDir.Build(fsisolation.None, nil, tg.Tasks[0].User)) + require.NoError(t, request.TaskDir.Build(false, nil)) // Insert service identity token in the secrets directory token := uuid.Generate() @@ -488,7 +487,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS(t *testing.T) { TaskDir: allocDir.NewTaskDir(tg.Tasks[0].Name), TaskEnv: taskenv.NewEmptyTaskEnv(), // nothing set in env block } - require.NoError(t, request.TaskDir.Build(fsisolation.None, nil, tg.Tasks[0].User)) + require.NoError(t, request.TaskDir.Build(false, nil)) response := new(interfaces.TaskPrestartResponse) response.Env = make(map[string]string) @@ -615,7 +614,7 @@ func TestTaskRunner_ConnectNativeHook_shareTLS_override(t *testing.T) { TaskDir: allocDir.NewTaskDir(tg.Tasks[0].Name), TaskEnv: taskEnv, // env block is configured w/ non-default tls configs } - require.NoError(t, request.TaskDir.Build(fsisolation.None, nil, tg.Tasks[0].User)) + require.NoError(t, request.TaskDir.Build(false, nil)) response := new(interfaces.TaskPrestartResponse) response.Env = make(map[string]string) diff --git a/client/allocrunner/taskrunner/dispatch_hook_test.go b/client/allocrunner/taskrunner/dispatch_hook_test.go index efbe4e056602..1b55c27cb6f7 100644 --- a/client/allocrunner/taskrunner/dispatch_hook_test.go +++ b/client/allocrunner/taskrunner/dispatch_hook_test.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/stretchr/testify/require" ) @@ -36,10 +35,10 @@ func TestTaskRunner_DispatchHook_NoPayload(t *testing.T) { alloc := mock.BatchAlloc() task := alloc.Job.TaskGroups[0].Tasks[0] - allocDir := allocdir.NewAllocDir(logger, "nomadtest_nopayload", "nomadtest_nopayload", alloc.ID) + allocDir := allocdir.NewAllocDir(logger, "nomadtest_nopayload", alloc.ID) defer allocDir.Destroy() taskDir := allocDir.NewTaskDir(task.Name) - require.NoError(taskDir.Build(fsisolation.None, nil, task.User)) + require.NoError(taskDir.Build(false, nil)) h := newDispatchHook(alloc, logger) @@ -82,10 +81,10 @@ func TestTaskRunner_DispatchHook_Ok(t *testing.T) { File: "out", } - allocDir := allocdir.NewAllocDir(logger, "nomadtest_dispatchok", "nomadtest_dispatchok", alloc.ID) + allocDir := allocdir.NewAllocDir(logger, "nomadtest_dispatchok", alloc.ID) defer allocDir.Destroy() taskDir := allocDir.NewTaskDir(task.Name) - require.NoError(taskDir.Build(fsisolation.None, nil, task.User)) + require.NoError(taskDir.Build(false, nil)) h := newDispatchHook(alloc, logger) @@ -127,10 +126,10 @@ func TestTaskRunner_DispatchHook_Error(t *testing.T) { File: "out", } - allocDir := allocdir.NewAllocDir(logger, "nomadtest_dispatcherr", "nomadtest_dispatcherr", alloc.ID) + allocDir := allocdir.NewAllocDir(logger, "nomadtest_dispatcherr", alloc.ID) defer allocDir.Destroy() taskDir := allocDir.NewTaskDir(task.Name) - require.NoError(taskDir.Build(fsisolation.None, nil, task.User)) + require.NoError(taskDir.Build(false, nil)) h := newDispatchHook(alloc, logger) diff --git a/client/allocrunner/taskrunner/dynamic_users_hook.go b/client/allocrunner/taskrunner/dynamic_users_hook.go deleted file mode 100644 index ff67d0c139f9..000000000000 --- a/client/allocrunner/taskrunner/dynamic_users_hook.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package taskrunner - -import ( - "context" - "fmt" - "sync" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/client/allocrunner/interfaces" - "github.com/hashicorp/nomad/helper/users/dynamic" -) - -const ( - dynamicUsersHookName = "workload_users" - dynamicUsersStateKey = "dynamic_user_ugid" -) - -// dynamicUsersHook is used for allocating a one-time use UID/GID on behalf of -// a single workload (task). No other task will be assigned the same UID/GID -// while this task is running. -type dynamicUsersHook struct { - shutdownCtx context.Context - logger hclog.Logger - usable bool - - lock *sync.Mutex - pool dynamic.Pool -} - -func newDynamicUsersHook(ctx context.Context, usable bool, logger hclog.Logger, pool dynamic.Pool) *dynamicUsersHook { - return &dynamicUsersHook{ - shutdownCtx: ctx, - logger: logger.Named(dynamicUsersHookName), - lock: new(sync.Mutex), - pool: pool, - usable: usable, - } -} - -func (*dynamicUsersHook) Name() string { - return dynamicUsersHookName -} - -// Prestart runs on both initial start and on restart. -func (h *dynamicUsersHook) Prestart(_ context.Context, request *interfaces.TaskPrestartRequest, response *interfaces.TaskPrestartResponse) error { - // if the task driver does not support the DynamicWorkloadUsers capability, - // do nothing - if !h.usable { - return nil - } - - // if the task has a user set, do nothing - // - // it's up to the job-submitter to set a user that exists on the system - if request.Task.User != "" { - return nil - } - - // if this is the restart case, the UGID will already be acquired and we - // just need to read it back out of the hook's state - if request.PreviousState != nil { - ugid, exists := request.PreviousState[dynamicUsersStateKey] - if exists { - response.State[dynamicUsersStateKey] = ugid - return nil - } - } - - // otherwise we will acquire a dynamic UGID from the pool. - h.lock.Lock() - defer h.lock.Unlock() - - // allocate an unused UID/GID from the pool - ugid, err := h.pool.Acquire() - if err != nil { - h.logger.Error("unable to acquire anonymous UID/GID: %v", err) - return err - } - - h.logger.Trace("acquired dynamic workload user", "ugid", ugid) - - // set the special user of the task - request.Task.User = dynamic.String(ugid) - - // set the user on the hook so we may release it later - response.State = make(map[string]string, 1) - response.State[dynamicUsersStateKey] = request.Task.User - - return nil -} - -func (h *dynamicUsersHook) Stop(_ context.Context, request *interfaces.TaskStopRequest, response *interfaces.TaskStopResponse) error { - // if the task driver does not support the DWU capability, nothing to do - if !h.usable { - return nil - } - - // if we did not store a user for this task; nothing to release - user, exists := request.ExistingState[dynamicUsersStateKey] - if !exists { - return nil - } - - // otherwise we need to release the UGID back to the pool - h.lock.Lock() - defer h.lock.Unlock() - - // parse the UID/GID from the pseudo username - ugid, err := dynamic.Parse(user) - if err != nil { - return fmt.Errorf("unable to release dynamic workload user: %w", err) - } - - // release the UID/GID to the pool - if err = h.pool.Release(ugid); err != nil { - return fmt.Errorf("unable to release dynamic workload user: %w", err) - } - - h.logger.Trace("released dynamic workload user", "ugid", ugid) - return nil -} diff --git a/client/allocrunner/taskrunner/dynamic_users_hook_test.go b/client/allocrunner/taskrunner/dynamic_users_hook_test.go deleted file mode 100644 index 5ebfa621fd07..000000000000 --- a/client/allocrunner/taskrunner/dynamic_users_hook_test.go +++ /dev/null @@ -1,203 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package taskrunner - -import ( - "context" - "testing" - - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/client/allocrunner/interfaces" - "github.com/hashicorp/nomad/helper/testlog" - "github.com/hashicorp/nomad/helper/users/dynamic" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test/must" -) - -func TestTaskRunner_DynamicUsersHook_Prestart_unusable(t *testing.T) { - ci.Parallel(t) - - // task driver does not indicate DynamicWorkloadUsers capability - const capable = false - ctx := context.Background() - logger := testlog.HCLogger(t) - - // if the driver does not indicate the DynamicWorkloadUsers capability, - // none of the pool, request, or response are touched - so using nil - // for each of them shows we are exiting the hook immediatly - var pool dynamic.Pool = nil - var request *interfaces.TaskPrestartRequest = nil - var response *interfaces.TaskPrestartResponse = nil - - h := newDynamicUsersHook(ctx, capable, logger, pool) - must.False(t, h.usable) - must.NoError(t, h.Prestart(ctx, request, response)) -} - -func TestTaskRunner_DynamicUsersHook_Prestart_unnecessary(t *testing.T) { - ci.Parallel(t) - - const capable = true - ctx := context.Background() - logger := testlog.HCLogger(t) - - // if the task configures a user, no dynamic workload user will be allocated - // and we prove this by setting a nil pool - var pool dynamic.Pool = nil - var response = new(interfaces.TaskPrestartResponse) - var request = &interfaces.TaskPrestartRequest{ - Task: &structs.Task{User: "billy"}, - } - - h := newDynamicUsersHook(ctx, capable, logger, pool) - must.True(t, h.usable) - must.NoError(t, h.Prestart(ctx, request, response)) - must.MapEmpty(t, response.State) // no user set - must.Eq(t, "billy", request.Task.User) // not modified -} - -func TestTaskRunner_DynamicUsersHook_Prestart_used(t *testing.T) { - ci.Parallel(t) - - const capable = true - ctx := context.Background() - logger := testlog.HCLogger(t) - - // create a pool allowing UIDs in range [100, 199] - var pool dynamic.Pool = dynamic.New(&dynamic.PoolConfig{ - MinUGID: 100, - MaxUGID: 199, - }) - var response = new(interfaces.TaskPrestartResponse) - var request = &interfaces.TaskPrestartRequest{ - Task: &structs.Task{User: ""}, // user is not set - } - - // once the hook runs, check we got an expected ugid and the - // task user is set to our pseudo dynamic username - h := newDynamicUsersHook(ctx, capable, logger, pool) - must.True(t, h.usable) - must.NoError(t, h.Prestart(ctx, request, response)) - username, exists := response.State[dynamicUsersStateKey] - must.True(t, exists) - ugid, err := dynamic.Parse(username) - must.NoError(t, err) - must.Between(t, 100, ugid, 199) - must.Eq(t, username, request.Task.User) - must.StrHasPrefix(t, "nomad-", username) -} - -func TestTaskRunner_DynamicUsersHook_Prestart_exhausted(t *testing.T) { - ci.Parallel(t) - - const capable = true - ctx := context.Background() - logger := testlog.HCLogger(t) - - // create a pool allowing UIDs in range [100, 199] - var pool dynamic.Pool = dynamic.New(&dynamic.PoolConfig{ - MinUGID: 100, - MaxUGID: 101, - }) - pool.Restore(100) - pool.Restore(101) - var response = new(interfaces.TaskPrestartResponse) - var request = &interfaces.TaskPrestartRequest{ - Task: &structs.Task{User: ""}, // user is not set - } - - h := newDynamicUsersHook(ctx, capable, logger, pool) - must.True(t, h.usable) - must.ErrorContains(t, h.Prestart(ctx, request, response), "uid/gid pool exhausted") -} - -func TestTaskRunner_DynamicUsersHook_Stop_unusable(t *testing.T) { - ci.Parallel(t) - - const capable = false - ctx := context.Background() - logger := testlog.HCLogger(t) - - // prove we use none of these by setting them all to nil - var pool dynamic.Pool = nil - var request *interfaces.TaskStopRequest = nil - var response *interfaces.TaskStopResponse = nil - - h := newDynamicUsersHook(ctx, capable, logger, pool) - must.False(t, h.usable) - must.NoError(t, h.Stop(ctx, request, response)) -} - -func TestTaskRunner_DynamicUsersHook_Stop_release(t *testing.T) { - ci.Parallel(t) - - const capable = true - ctx := context.Background() - logger := testlog.HCLogger(t) - - // prove we use none of these by setting them all to nil - var pool dynamic.Pool = dynamic.New(&dynamic.PoolConfig{ - MinUGID: 100, - MaxUGID: 199, - }) - pool.Restore(150) // allocate ugid 150 - var request = &interfaces.TaskStopRequest{ - ExistingState: map[string]string{ - dynamicUsersStateKey: "nomad-150", - }, - } - var response = new(interfaces.TaskStopResponse) - - h := newDynamicUsersHook(ctx, capable, logger, pool) - must.True(t, h.usable) - must.NoError(t, h.Stop(ctx, request, response)) -} - -func TestTaskRunner_DynamicUsersHook_Stop_malformed(t *testing.T) { - ci.Parallel(t) - - const capable = true - ctx := context.Background() - logger := testlog.HCLogger(t) - - // prove we use none of these by setting them all to nil - var pool dynamic.Pool = dynamic.New(&dynamic.PoolConfig{ - MinUGID: 100, - MaxUGID: 199, - }) - var request = &interfaces.TaskStopRequest{ - ExistingState: map[string]string{ - dynamicUsersStateKey: "not-valid", - }, - } - var response = new(interfaces.TaskStopResponse) - - h := newDynamicUsersHook(ctx, capable, logger, pool) - must.True(t, h.usable) - must.ErrorContains(t, h.Stop(ctx, request, response), "unable to parse uid/gid from username") -} - -func TestTaskRunner_DynamicUsersHook_Stop_not_in_use(t *testing.T) { - ci.Parallel(t) - - const capable = true - ctx := context.Background() - logger := testlog.HCLogger(t) - - // prove we use none of these by setting them all to nil - var pool dynamic.Pool = dynamic.New(&dynamic.PoolConfig{ - MinUGID: 100, - MaxUGID: 199, - }) - var request = &interfaces.TaskStopRequest{ - ExistingState: map[string]string{ - dynamicUsersStateKey: "nomad-101", - }, - } - var response = new(interfaces.TaskStopResponse) - - h := newDynamicUsersHook(ctx, capable, logger, pool) - must.True(t, h.usable) - must.ErrorContains(t, h.Stop(ctx, request, response), "release of unused uid/gid") -} diff --git a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go index 0e6b32d29150..f6caa50821e7 100644 --- a/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_bootstrap_hook_test.go @@ -32,7 +32,6 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/nomad/structs/config" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/stretchr/testify/require" "golang.org/x/sys/unix" ) @@ -356,7 +355,7 @@ func TestEnvoyBootstrapHook_with_SI_token(t *testing.T) { TaskDir: allocDir.NewTaskDir(sidecarTask.Name), TaskEnv: taskenv.NewEmptyTaskEnv(), } - require.NoError(t, req.TaskDir.Build(fsisolation.None, nil, sidecarTask.User)) + require.NoError(t, req.TaskDir.Build(false, nil)) // Insert service identity token in the secrets directory token := uuid.Generate() @@ -454,7 +453,7 @@ func TestTaskRunner_EnvoyBootstrapHook_sidecar_ok(t *testing.T) { TaskDir: allocDir.NewTaskDir(sidecarTask.Name), TaskEnv: taskenv.NewEmptyTaskEnv(), } - require.NoError(t, req.TaskDir.Build(fsisolation.None, nil, sidecarTask.User)) + require.NoError(t, req.TaskDir.Build(false, nil)) resp := &interfaces.TaskPrestartResponse{} @@ -534,7 +533,7 @@ func TestTaskRunner_EnvoyBootstrapHook_gateway_ok(t *testing.T) { TaskDir: allocDir.NewTaskDir(alloc.Job.TaskGroups[0].Tasks[0].Name), TaskEnv: taskenv.NewEmptyTaskEnv(), } - require.NoError(t, req.TaskDir.Build(fsisolation.None, nil, alloc.Job.TaskGroups[0].Tasks[0].User)) + require.NoError(t, req.TaskDir.Build(false, nil)) var resp interfaces.TaskPrestartResponse @@ -583,7 +582,7 @@ func TestTaskRunner_EnvoyBootstrapHook_Noop(t *testing.T) { Task: task, TaskDir: allocDir.NewTaskDir(task.Name), } - require.NoError(t, req.TaskDir.Build(fsisolation.None, nil, task.User)) + require.NoError(t, req.TaskDir.Build(false, nil)) resp := &interfaces.TaskPrestartResponse{} @@ -659,7 +658,7 @@ func TestTaskRunner_EnvoyBootstrapHook_RecoverableError(t *testing.T) { TaskDir: allocDir.NewTaskDir(sidecarTask.Name), TaskEnv: taskenv.NewEmptyTaskEnv(), } - require.NoError(t, req.TaskDir.Build(fsisolation.None, nil, sidecarTask.User)) + require.NoError(t, req.TaskDir.Build(false, nil)) resp := &interfaces.TaskPrestartResponse{} @@ -744,7 +743,7 @@ func TestTaskRunner_EnvoyBootstrapHook_retryTimeout(t *testing.T) { TaskDir: allocDir.NewTaskDir(sidecarTask.Name), TaskEnv: taskenv.NewEmptyTaskEnv(), } - require.NoError(t, req.TaskDir.Build(fsisolation.None, nil, sidecarTask.User)) + require.NoError(t, req.TaskDir.Build(false, nil)) var resp interfaces.TaskPrestartResponse diff --git a/client/allocrunner/taskrunner/envoy_version_hook_test.go b/client/allocrunner/taskrunner/envoy_version_hook_test.go index bd1d1d44a691..26f45dc8189c 100644 --- a/client/allocrunner/taskrunner/envoy_version_hook_test.go +++ b/client/allocrunner/taskrunner/envoy_version_hook_test.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/shoenig/test/must" ) @@ -254,7 +253,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_standard(t *testing.T) { TaskDir: allocDir.NewTaskDir(alloc.Job.TaskGroups[0].Tasks[0].Name), TaskEnv: taskEnvDefault, } - must.NoError(t, request.TaskDir.Build(fsisolation.None, nil, alloc.Job.TaskGroups[0].Tasks[0].User)) + must.NoError(t, request.TaskDir.Build(false, nil)) // Prepare a response var response ifs.TaskPrestartResponse @@ -297,7 +296,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_custom(t *testing.T) { TaskDir: allocDir.NewTaskDir(alloc.Job.TaskGroups[0].Tasks[0].Name), TaskEnv: taskEnvDefault, } - must.NoError(t, request.TaskDir.Build(fsisolation.None, nil, alloc.Job.TaskGroups[0].Tasks[0].User)) + must.NoError(t, request.TaskDir.Build(false, nil)) // Prepare a response var response ifs.TaskPrestartResponse @@ -342,7 +341,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_skip(t *testing.T) { TaskDir: allocDir.NewTaskDir(alloc.Job.TaskGroups[0].Tasks[0].Name), TaskEnv: taskEnvDefault, } - must.NoError(t, request.TaskDir.Build(fsisolation.None, nil, alloc.Job.TaskGroups[0].Tasks[0].User)) + must.NoError(t, request.TaskDir.Build(false, nil)) // Prepare a response var response ifs.TaskPrestartResponse @@ -381,7 +380,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_no_fallback(t *testing.T) { TaskDir: allocDir.NewTaskDir(alloc.Job.TaskGroups[0].Tasks[0].Name), TaskEnv: taskEnvDefault, } - must.NoError(t, request.TaskDir.Build(fsisolation.None, nil, alloc.Job.TaskGroups[0].Tasks[0].User)) + must.NoError(t, request.TaskDir.Build(false, nil)) // Prepare a response var response ifs.TaskPrestartResponse @@ -417,7 +416,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_error(t *testing.T) { TaskDir: allocDir.NewTaskDir(alloc.Job.TaskGroups[0].Tasks[0].Name), TaskEnv: taskEnvDefault, } - must.NoError(t, request.TaskDir.Build(fsisolation.None, nil, alloc.Job.TaskGroups[0].Tasks[0].User)) + must.NoError(t, request.TaskDir.Build(false, nil)) // Prepare a response var response ifs.TaskPrestartResponse @@ -456,7 +455,7 @@ func TestTaskRunner_EnvoyVersionHook_Prestart_restart(t *testing.T) { TaskDir: allocDir.NewTaskDir(alloc.Job.TaskGroups[0].Tasks[0].Name), TaskEnv: taskEnvDefault, } - must.NoError(t, request.TaskDir.Build(fsisolation.None, nil, alloc.Job.TaskGroups[0].Tasks[0].User)) + must.NoError(t, request.TaskDir.Build(false, nil)) // Prepare a response var response ifs.TaskPrestartResponse diff --git a/client/allocrunner/taskrunner/getter/params.go b/client/allocrunner/taskrunner/getter/params.go index 51a7b45a267c..1daf30093e55 100644 --- a/client/allocrunner/taskrunner/getter/params.go +++ b/client/allocrunner/taskrunner/getter/params.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/hashicorp/go-cleanhttp" "github.com/hashicorp/go-getter" ) @@ -35,7 +36,6 @@ type parameters struct { // Artifact Mode getter.ClientMode `json:"artifact_mode"` - Insecure bool `json:"artifact_insecure"` Source string `json:"artifact_source"` Destination string `json:"artifact_destination"` Headers map[string][]string `json:"artifact_headers"` @@ -102,8 +102,6 @@ func (p *parameters) Equal(o *parameters) bool { return false case p.Mode != o.Mode: return false - case p.Insecure != o.Insecure: - return false case p.Source != o.Source: return false case p.Destination != o.Destination: @@ -132,6 +130,7 @@ const ( func (p *parameters) client(ctx context.Context) *getter.Client { httpGetter := &getter.HttpGetter{ Netrc: true, + Client: cleanhttp.DefaultClient(), Header: p.Headers, // Do not support the custom X-Terraform-Get header and @@ -163,8 +162,8 @@ func (p *parameters) client(ctx context.Context) *getter.Client { Src: p.Source, Dst: p.Destination, Mode: p.Mode, - Insecure: p.Insecure, Umask: umask, + Insecure: false, DisableSymlinks: true, Decompressors: decompressors, Getters: map[string]getter.Getter{ diff --git a/client/allocrunner/taskrunner/getter/params_test.go b/client/allocrunner/taskrunner/getter/params_test.go index c262bb3a86e1..386b8b1ff76c 100644 --- a/client/allocrunner/taskrunner/getter/params_test.go +++ b/client/allocrunner/taskrunner/getter/params_test.go @@ -27,7 +27,6 @@ const paramsAsJSON = ` "disable_filesystem_isolation": true, "set_environment_variables": "", "artifact_mode": 2, - "artifact_insecure": false, "artifact_source": "https://example.com/file.txt", "artifact_destination": "local/out.txt", "artifact_headers": { diff --git a/client/allocrunner/taskrunner/getter/sandbox.go b/client/allocrunner/taskrunner/getter/sandbox.go index fdaca51b56c1..c114a11bf92b 100644 --- a/client/allocrunner/taskrunner/getter/sandbox.go +++ b/client/allocrunner/taskrunner/getter/sandbox.go @@ -38,7 +38,6 @@ func (s *Sandbox) Get(env interfaces.EnvReplacer, artifact *structs.TaskArtifact } mode := getMode(artifact) - insecure := isInsecure(artifact) headers := getHeaders(env, artifact) allocDir, taskDir := getWritableDirs(env) @@ -57,7 +56,6 @@ func (s *Sandbox) Get(env interfaces.EnvReplacer, artifact *structs.TaskArtifact // artifact configuration Mode: mode, - Insecure: insecure, Source: source, Destination: destination, Headers: headers, diff --git a/client/allocrunner/taskrunner/getter/sandbox_test.go b/client/allocrunner/taskrunner/getter/sandbox_test.go index 7906c7668fa5..6028d211cd68 100644 --- a/client/allocrunner/taskrunner/getter/sandbox_test.go +++ b/client/allocrunner/taskrunner/getter/sandbox_test.go @@ -4,8 +4,6 @@ package getter import ( - "net/http" - "net/http/httptest" "os" "path/filepath" "testing" @@ -53,32 +51,3 @@ func TestSandbox_Get_http(t *testing.T) { must.NoError(t, err) must.StrContains(t, string(b), "module github.com/hashicorp/go-set") } - -func TestSandbox_Get_insecure_http(t *testing.T) { - testutil.RequireRoot(t) - logger := testlog.HCLogger(t) - - ac := artifactConfig(10 * time.Second) - sbox := New(ac, logger) - - _, taskDir := SetupDir(t) - env := noopTaskEnv(taskDir) - - srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusOK) - })) - defer srv.Close() - - artifact := &structs.TaskArtifact{ - GetterSource: srv.URL, - RelativeDest: "local/downloads", - } - - err := sbox.Get(env, artifact) - must.Error(t, err) - must.StrContains(t, err.Error(), "x509: certificate signed by unknown authority") - - artifact.GetterInsecure = true - err = sbox.Get(env, artifact) - must.NoError(t, err) -} diff --git a/client/allocrunner/taskrunner/getter/util.go b/client/allocrunner/taskrunner/getter/util.go index afe961fe83c0..04bdb3b38f5f 100644 --- a/client/allocrunner/taskrunner/getter/util.go +++ b/client/allocrunner/taskrunner/getter/util.go @@ -84,10 +84,6 @@ func getMode(artifact *structs.TaskArtifact) getter.ClientMode { } } -func isInsecure(artifact *structs.TaskArtifact) bool { - return artifact.GetterInsecure -} - func getHeaders(env interfaces.EnvReplacer, artifact *structs.TaskArtifact) map[string][]string { m := artifact.GetterHeaders if len(m) == 0 { diff --git a/client/allocrunner/taskrunner/task_dir_hook.go b/client/allocrunner/taskrunner/task_dir_hook.go index 6ddc72f96d79..2f193338960e 100644 --- a/client/allocrunner/taskrunner/task_dir_hook.go +++ b/client/allocrunner/taskrunner/task_dir_hook.go @@ -5,7 +5,6 @@ package taskrunner import ( "context" - "path/filepath" "strings" log "github.com/hashicorp/go-hclog" @@ -15,7 +14,7 @@ import ( cconfig "github.com/hashicorp/nomad/client/config" "github.com/hashicorp/nomad/client/taskenv" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" + "github.com/hashicorp/nomad/plugins/drivers" ) const ( @@ -66,7 +65,7 @@ func (h *taskDirHook) Prestart(ctx context.Context, req *interfaces.TaskPrestart h.runner.EmitEvent(structs.NewTaskEvent(structs.TaskSetup).SetMessage(structs.TaskBuildingTaskDir)) // Build the task directory structure - err := h.runner.taskDir.Build(fsi, chroot, req.Task.User) + err := h.runner.taskDir.Build(fsi == drivers.FSIsolationChroot, chroot) if err != nil { return err } @@ -80,7 +79,7 @@ func (h *taskDirHook) Prestart(ctx context.Context, req *interfaces.TaskPrestart } // setEnvvars sets path and host env vars depending on the FS isolation used. -func setEnvvars(envBuilder *taskenv.Builder, fsi fsisolation.Mode, taskDir *allocdir.TaskDir, conf *cconfig.Config) { +func setEnvvars(envBuilder *taskenv.Builder, fsi drivers.FSIsolation, taskDir *allocdir.TaskDir, conf *cconfig.Config) { envBuilder.SetClientTaskRoot(taskDir.Dir) envBuilder.SetClientSharedAllocDir(taskDir.SharedAllocDir) @@ -89,12 +88,7 @@ func setEnvvars(envBuilder *taskenv.Builder, fsi fsisolation.Mode, taskDir *allo // Set driver-specific environment variables switch fsi { - case fsisolation.Unveil: - // Use mount paths - envBuilder.SetAllocDir(filepath.Join(taskDir.MountsAllocDir, "alloc")) - envBuilder.SetTaskLocalDir(filepath.Join(taskDir.MountsTaskDir, "local")) - envBuilder.SetSecretsDir(filepath.Join(taskDir.SecretsDir, "secrets")) - case fsisolation.None: + case drivers.FSIsolationNone: // Use host paths envBuilder.SetAllocDir(taskDir.SharedAllocDir) envBuilder.SetTaskLocalDir(taskDir.LocalDir) @@ -107,7 +101,7 @@ func setEnvvars(envBuilder *taskenv.Builder, fsi fsisolation.Mode, taskDir *allo } // Set the host environment variables for non-image based drivers - if fsi != fsisolation.Image { + if fsi != drivers.FSIsolationImage { // COMPAT(1.0) using inclusive language, blacklist is kept for backward compatibility. filter := strings.Split(conf.ReadAlternativeDefault( []string{"env.denylist", "env.blacklist"}, diff --git a/client/allocrunner/taskrunner/task_runner.go b/client/allocrunner/taskrunner/task_runner.go index 53c7b196db58..3f9efb0332e0 100644 --- a/client/allocrunner/taskrunner/task_runner.go +++ b/client/allocrunner/taskrunner/task_runner.go @@ -38,7 +38,6 @@ import ( "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pluginutils/hclspecutils" "github.com/hashicorp/nomad/helper/pluginutils/hclutils" - "github.com/hashicorp/nomad/helper/users/dynamic" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" bstructs "github.com/hashicorp/nomad/plugins/base/structs" @@ -271,9 +270,6 @@ type TaskRunner struct { // widmgr manages workload identities widmgr widmgr.IdentityManager - - // users manages the pool of dynamic workload users - users dynamic.Pool } type Config struct { @@ -349,9 +345,6 @@ type Config struct { // WIDMgr manages workload identities WIDMgr widmgr.IdentityManager - - // Users manages a pool of dynamic workload users - Users dynamic.Pool } func NewTaskRunner(config *Config) (*TaskRunner, error) { @@ -414,7 +407,6 @@ func NewTaskRunner(config *Config) (*TaskRunner, error) { getter: config.Getter, wranglers: config.Wranglers, widmgr: config.WIDMgr, - users: config.Users, } // Create the logger based on the allocation ID @@ -1125,7 +1117,7 @@ func (tr *TaskRunner) persistLocalState() error { func (tr *TaskRunner) buildTaskConfig() *drivers.TaskConfig { task := tr.Task() alloc := tr.Alloc() - invocationid := uuid.Short() + invocationid := uuid.Generate()[:8] taskResources := tr.taskResources ports := tr.Alloc().AllocatedResources.Shared.Ports env := tr.envBuilder.Build() diff --git a/client/allocrunner/taskrunner/task_runner_hooks.go b/client/allocrunner/taskrunner/task_runner_hooks.go index 9dc169e9e598..5282f66978fd 100644 --- a/client/allocrunner/taskrunner/task_runner_hooks.go +++ b/client/allocrunner/taskrunner/task_runner_hooks.go @@ -63,7 +63,6 @@ func (tr *TaskRunner) initHooks() { alloc := tr.Alloc() tr.runnerHooks = []interfaces.TaskHook{ newValidateHook(tr.clientConfig, hookLogger), - newDynamicUsersHook(tr.killCtx, tr.driverCapabilities.DynamicWorkloadUsers, tr.logger, tr.users), newTaskDirHook(tr, hookLogger), newIdentityHook(tr, hookLogger), newLogMonHook(tr, hookLogger), diff --git a/client/allocrunner/taskrunner/task_runner_linux_test.go b/client/allocrunner/taskrunner/task_runner_linux_test.go index 7a115a221d2f..3f8148ea818c 100644 --- a/client/allocrunner/taskrunner/task_runner_linux_test.go +++ b/client/allocrunner/taskrunner/task_runner_linux_test.go @@ -15,7 +15,6 @@ import ( "github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/shoenig/test/must" ) @@ -49,7 +48,7 @@ func TestTaskRunner_DisableFileForVaultToken_UpgradePath(t *testing.T) { // Remove private dir and write the Vault token to the secrets dir to // simulate an old task. - err = conf.TaskDir.Build(fsisolation.None, nil, task.User) + err = conf.TaskDir.Build(false, nil) must.NoError(t, err) err = syscall.Unmount(conf.TaskDir.PrivateDir, 0) diff --git a/client/allocrunner/taskrunner/task_runner_test.go b/client/allocrunner/taskrunner/task_runner_test.go index e4b6e678f3cc..876061f6bcba 100644 --- a/client/allocrunner/taskrunner/task_runner_test.go +++ b/client/allocrunner/taskrunner/task_runner_test.go @@ -99,7 +99,7 @@ func testTaskRunnerConfig(t *testing.T, alloc *structs.Allocation, taskName stri } // Create the alloc dir + task dir - allocDir := allocdir.NewAllocDir(logger, clientConf.AllocDir, clientConf.AllocMountsDir, alloc.ID) + allocDir := allocdir.NewAllocDir(logger, clientConf.AllocDir, alloc.ID) if err := allocDir.Build(); err != nil { cleanup() t.Fatalf("error building alloc dir: %v", err) diff --git a/client/allocrunner/taskrunner/template/template_test.go b/client/allocrunner/taskrunner/template/template_test.go index 237209b54925..819f9399d309 100644 --- a/client/allocrunner/taskrunner/template/template_test.go +++ b/client/allocrunner/taskrunner/template/template_test.go @@ -1822,7 +1822,7 @@ func TestTaskTemplateManager_Escapes(t *testing.T) { clienttestutil.RequireNotWindows(t) clientConf := config.DefaultConfig() - must.False(t, clientConf.TemplateConfig.DisableSandbox, must.Sprint("expected sandbox to be enabled")) + must.False(t, clientConf.TemplateConfig.DisableSandbox, must.Sprint("expected sandbox to be disabled")) // Set a fake alloc dir to make test output more realistic clientConf.AllocDir = "/fake/allocdir" @@ -1831,7 +1831,7 @@ func TestTaskTemplateManager_Escapes(t *testing.T) { alloc := mock.Alloc() task := alloc.Job.TaskGroups[0].Tasks[0] logger := testlog.HCLogger(t) - allocDir := allocdir.NewAllocDir(logger, clientConf.AllocDir, clientConf.AllocMountsDir, alloc.ID) + allocDir := allocdir.NewAllocDir(logger, clientConf.AllocDir, alloc.ID) taskDir := allocDir.NewTaskDir(task.Name) containerEnv := func() *taskenv.Builder { diff --git a/client/allocrunner/taskrunner/volume_hook.go b/client/allocrunner/taskrunner/volume_hook.go index 61aeec569c6e..a6da6bdb0642 100644 --- a/client/allocrunner/taskrunner/volume_hook.go +++ b/client/allocrunner/taskrunner/volume_hook.go @@ -95,7 +95,6 @@ func (h *volumeHook) hostVolumeMountConfigurations(taskMounts []*structs.VolumeM TaskPath: m.Destination, Readonly: hostVolume.ReadOnly || req.ReadOnly || m.ReadOnly, PropagationMode: m.PropagationMode, - SELinuxLabel: m.SELinuxLabel, } mounts = append(mounts, mcfg) } @@ -189,7 +188,6 @@ func (h *volumeHook) prepareCSIVolumes(req *interfaces.TaskPrestartRequest, volu TaskPath: m.Destination, Readonly: request.ReadOnly || m.ReadOnly, PropagationMode: m.PropagationMode, - SELinuxLabel: m.SELinuxLabel, } mounts = append(mounts, mcfg) } diff --git a/client/allocwatcher/alloc_watcher.go b/client/allocwatcher/alloc_watcher.go index ee0f096aee47..76f1d1c8d949 100644 --- a/client/allocwatcher/alloc_watcher.go +++ b/client/allocwatcher/alloc_watcher.go @@ -45,7 +45,7 @@ type terminated interface { // AllocRunnerMeta provides metadata about an AllocRunner such as its alloc and // alloc dir. type AllocRunnerMeta interface { - GetAllocDir() allocdir.Interface + GetAllocDir() *allocdir.AllocDir Listener() *cstructs.AllocListener Alloc() *structs.Allocation } @@ -193,7 +193,7 @@ type localPrevAlloc struct { sticky bool // prevAllocDir is the alloc dir for the previous alloc - prevAllocDir allocdir.Interface + prevAllocDir *allocdir.AllocDir // prevListener allows blocking for updates to the previous alloc prevListener *cstructs.AllocListener @@ -263,7 +263,7 @@ func (p *localPrevAlloc) Wait(ctx context.Context) error { } // Migrate from previous local alloc dir to destination alloc dir. -func (p *localPrevAlloc) Migrate(ctx context.Context, dest allocdir.Interface) error { +func (p *localPrevAlloc) Migrate(ctx context.Context, dest *allocdir.AllocDir) error { if !p.sticky { // Not a sticky volume, nothing to migrate return nil @@ -426,7 +426,7 @@ func (p *remotePrevAlloc) Wait(ctx context.Context) error { // Migrate alloc data from a remote node if the new alloc has migration enabled // and the old alloc hasn't been GC'd. -func (p *remotePrevAlloc) Migrate(ctx context.Context, dest allocdir.Interface) error { +func (p *remotePrevAlloc) Migrate(ctx context.Context, dest *allocdir.AllocDir) error { if !p.migrate { // Volume wasn't configured to be migrated, return early return nil @@ -514,7 +514,7 @@ func (p *remotePrevAlloc) getNodeAddr(ctx context.Context, nodeID string) (strin // Destroy on the returned allocdir if no error occurs. func (p *remotePrevAlloc) migrateAllocDir(ctx context.Context, nodeAddr string) (*allocdir.AllocDir, error) { // Create the previous alloc dir - prevAllocDir := allocdir.NewAllocDir(p.logger, p.config.AllocDir, p.config.AllocMountsDir, p.prevAllocID) + prevAllocDir := allocdir.NewAllocDir(p.logger, p.config.AllocDir, p.prevAllocID) if err := prevAllocDir.Build(); err != nil { return nil, fmt.Errorf("error building alloc dir for previous alloc %q: %w", p.prevAllocID, err) } @@ -687,7 +687,7 @@ type NoopPrevAlloc struct{} func (NoopPrevAlloc) Wait(context.Context) error { return nil } // Migrate returns nil immediately. -func (NoopPrevAlloc) Migrate(context.Context, allocdir.Interface) error { return nil } +func (NoopPrevAlloc) Migrate(context.Context, *allocdir.AllocDir) error { return nil } func (NoopPrevAlloc) IsWaiting() bool { return false } func (NoopPrevAlloc) IsMigrating() bool { return false } diff --git a/client/allocwatcher/alloc_watcher_test.go b/client/allocwatcher/alloc_watcher_test.go index d22dc8fef2a3..74e6f1a56108 100644 --- a/client/allocwatcher/alloc_watcher_test.go +++ b/client/allocwatcher/alloc_watcher_test.go @@ -29,7 +29,7 @@ import ( // fakeAllocRunner implements AllocRunnerMeta type fakeAllocRunner struct { alloc *structs.Allocation - AllocDir allocdir.Interface + AllocDir *allocdir.AllocDir Broadcaster *cstructs.AllocBroadcaster } @@ -44,12 +44,12 @@ func newFakeAllocRunner(t *testing.T, logger hclog.Logger) *fakeAllocRunner { return &fakeAllocRunner{ alloc: alloc, - AllocDir: allocdir.NewAllocDir(logger, path, path, alloc.ID), + AllocDir: allocdir.NewAllocDir(logger, path, alloc.ID), Broadcaster: cstructs.NewAllocBroadcaster(logger), } } -func (f *fakeAllocRunner) GetAllocDir() allocdir.Interface { +func (f *fakeAllocRunner) GetAllocDir() *allocdir.AllocDir { return f.AllocDir } diff --git a/client/client.go b/client/client.go index a82189abeafd..8f041bc41f60 100644 --- a/client/client.go +++ b/client/client.go @@ -56,7 +56,6 @@ import ( "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/helper/tlsutil" - "github.com/hashicorp/nomad/helper/users/dynamic" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" nconfig "github.com/hashicorp/nomad/nomad/structs/config" @@ -340,9 +339,6 @@ type Client struct { // widsigner signs workload identities widsigner widmgr.IdentitySigner - - // users is a pool of dynamic workload users - users dynamic.Pool } var ( @@ -475,12 +471,6 @@ func NewClient(cfg *config.Config, consulCatalog consul.CatalogAPI, consulProxie c.topology = numalib.NoImpl(ir.Topology) } - // Create the dynamic workload users pool - c.users = dynamic.New(&dynamic.PoolConfig{ - MinUGID: cfg.Users.MinDynamicUser, - MaxUGID: cfg.Users.MaxDynamicUser, - }) - // Create the cpu core partition manager c.partitions = cgroupslib.GetPartition( c.topology.UsableCores(), @@ -692,17 +682,10 @@ func (c *Client) init() error { c.stateDB = db - // Ensure the alloc mounts dir exists if we are configured with a custom path. - if conf.AllocMountsDir != "" { - if err := os.MkdirAll(conf.AllocMountsDir, 0o711); err != nil { - return fmt.Errorf("failed creating alloc mounts dir: %w", err) - } - } - - // Ensure the alloc dir exists if we are configured with a custom path. + // Ensure the alloc dir exists if we have one if conf.AllocDir != "" { - if err := os.MkdirAll(conf.AllocDir, 0o711); err != nil { - return fmt.Errorf("failed creating alloc dir: %w", err) + if err := os.MkdirAll(conf.AllocDir, 0711); err != nil { + return fmt.Errorf("failed creating alloc dir: %s", err) } } else { // Otherwise make a temp directory to use. @@ -717,13 +700,12 @@ func (c *Client) init() error { } // Change the permissions to have the execute bit - if err := os.Chmod(p, 0o711); err != nil { + if err := os.Chmod(p, 0711); err != nil { return fmt.Errorf("failed to change directory permissions for the AllocDir: %v", err) } conf = c.UpdateConfig(func(c *config.Config) { c.AllocDir = p - c.AllocMountsDir = p }) } @@ -2686,7 +2668,7 @@ func (c *Client) updateAlloc(update *structs.Allocation) { // Reconnect unknown allocations if they were updated and are not terminal. reconnect := update.ClientStatus == structs.AllocClientStatusUnknown && update.AllocModifyIndex > alloc.AllocModifyIndex && - (!update.ServerTerminalStatus() || !alloc.PreventRescheduleOnDisconnect()) + (!update.ServerTerminalStatus() || !alloc.PreventRescheduleOnLost()) if reconnect { err = ar.Reconnect(update) if err != nil { @@ -2790,7 +2772,6 @@ func (c *Client) newAllocRunnerConfig( WIDSigner: c.widsigner, Wranglers: c.wranglers, Partitions: c.partitions, - Users: c.users, } } diff --git a/client/client_interface_test.go b/client/client_interface_test.go index 7861bb357f65..852c9e5fe7a5 100644 --- a/client/client_interface_test.go +++ b/client/client_interface_test.go @@ -151,7 +151,7 @@ func (ar *emptyAllocRunner) GetTaskDriverCapabilities(taskName string) (*drivers func (ar *emptyAllocRunner) StatsReporter() interfaces.AllocStatsReporter { return ar } func (ar *emptyAllocRunner) Listener() *cstructs.AllocListener { return nil } -func (ar *emptyAllocRunner) GetAllocDir() allocdir.Interface { return nil } +func (ar *emptyAllocRunner) GetAllocDir() *allocdir.AllocDir { return nil } // LatestAllocStats lets this empty runner implement AllocStatsReporter func (ar *emptyAllocRunner) LatestAllocStats(taskFilter string) (*cstructs.AllocResourceUsage, error) { diff --git a/client/client_test.go b/client/client_test.go index 929140042087..d001dde32e9e 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -5,7 +5,6 @@ package client import ( "fmt" - "io/fs" "net" "os" "path/filepath" @@ -63,34 +62,6 @@ func TestClient_StartStop(t *testing.T) { } } -func TestClient_alloc_dirs(t *testing.T) { - ci.Parallel(t) - - parent := t.TempDir() - allocs := filepath.Join(parent, "allocs") - mounts := filepath.Join(parent, "mounts") - - client, cleanup := TestClient(t, func(c *config.Config) { - c.AllocDir = allocs - c.AllocMountsDir = mounts - }) - defer cleanup() - - t.Cleanup(func() { - test.NoError(t, client.Shutdown()) - }) - - // assert existence and permissions of alloc-dir - fi, err := os.Stat(allocs) - must.NoError(t, err) - must.Eq(t, 0o711|fs.ModeDir, fi.Mode()) - - // assert existence and permissions of alloc-mounts-dir - fi, err = os.Stat(allocs) - must.NoError(t, err) - must.Eq(t, 0o711|fs.ModeDir, fi.Mode()) -} - // Certain labels for metrics are dependant on client initial setup. This tests // that the client has properly initialized before we assign values to labels func TestClient_BaseLabels(t *testing.T) { diff --git a/client/config/arconfig.go b/client/config/arconfig.go index 3ce09f0c997f..4a58f9fd6936 100644 --- a/client/config/arconfig.go +++ b/client/config/arconfig.go @@ -21,7 +21,6 @@ import ( cstate "github.com/hashicorp/nomad/client/state" "github.com/hashicorp/nomad/client/vaultclient" "github.com/hashicorp/nomad/client/widmgr" - "github.com/hashicorp/nomad/helper/users/dynamic" "github.com/hashicorp/nomad/nomad/structs" ) @@ -120,9 +119,6 @@ type AllocRunnerConfig struct { // WIDMgr manages workload identities WIDMgr widmgr.IdentityManager - - // Users manages a pool of dynamic workload users - Users dynamic.Pool } // PrevAllocWatcher allows AllocRunners to wait for a previous allocation to @@ -145,5 +141,5 @@ type PrevAllocMigrator interface { IsMigrating() bool // Migrate data from previous alloc - Migrate(ctx context.Context, dest allocdir.Interface) error + Migrate(ctx context.Context, dest *allocdir.AllocDir) error } diff --git a/client/config/config.go b/client/config/config.go index 13db700b5bcf..ec2fad387484 100644 --- a/client/config/config.go +++ b/client/config/config.go @@ -97,16 +97,8 @@ type Config struct { StateDir string // AllocDir is where we store data for allocations - // - // In a production environment this should be owned by root with file - // mode 0o700. AllocDir string - // AllocMountsDir is where we bind mount paths from AllocDir for tasks making - // use of the unveil file isolation mode. In a production environment this - // should be owned by root with file mode 0o755. - AllocMountsDir string - // Logger provides a logger to the client Logger log.InterceptLogger @@ -336,9 +328,6 @@ type Config struct { // Drain configuration from the agent's config file. Drain *DrainConfig - // Uesrs configuration from the agent's config file. - Users *UsersConfig - // ExtraAllocHooks are run with other allocation hooks, mainly for testing. ExtraAllocHooks []interfaces.RunnerHook } @@ -834,7 +823,6 @@ func (c *Config) Copy() *Config { nc.TemplateConfig = c.TemplateConfig.Copy() nc.ReservableCores = slices.Clone(c.ReservableCores) nc.Artifact = c.Artifact.Copy() - nc.Users = c.Users.Copy() return &nc } @@ -865,10 +853,6 @@ func DefaultConfig() *Config { CgroupParent: "nomad.slice", // SETH todo MaxDynamicPort: structs.DefaultMinDynamicPort, MinDynamicPort: structs.DefaultMaxDynamicPort, - Users: &UsersConfig{ - MinDynamicUser: 80_000, - MaxDynamicUser: 89_999, - }, } return cfg diff --git a/client/config/config_test.go b/client/config/config_test.go index ce24de39f96b..83218b5ba66f 100644 --- a/client/config/config_test.go +++ b/client/config/config_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/consul-template/config" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestConfigRead(t *testing.T) { @@ -19,12 +19,16 @@ func TestConfigRead(t *testing.T) { config := Config{} actual := config.Read("cake") - must.Eq(t, "", actual) + if actual != "" { + t.Errorf("Expected empty string; found %s", actual) + } expected := "chocolate" config.Options = map[string]string{"cake": expected} actual = config.Read("cake") - must.Eq(t, expected, actual) + if actual != expected { + t.Errorf("Expected %s, found %s", expected, actual) + } } func TestConfigReadDefault(t *testing.T) { @@ -34,12 +38,16 @@ func TestConfigReadDefault(t *testing.T) { expected := "vanilla" actual := config.ReadDefault("cake", expected) - must.Eq(t, expected, actual) + if actual != expected { + t.Errorf("Expected %s, found %s", expected, actual) + } expected = "chocolate" config.Options = map[string]string{"cake": expected} actual = config.ReadDefault("cake", "vanilla") - must.Eq(t, expected, actual) + if actual != expected { + t.Errorf("Expected %s, found %s", expected, actual) + } } func mockWaitConfig() *WaitConfig { @@ -85,9 +93,13 @@ func TestWaitConfig_Copy(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - must.Equal(t, tc.Expected, tc.Wait.Copy()) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + result := _case.Expected.Equal(_case.Wait.Copy()) + if !result { + t.Logf("\nExpected %v\n Found %v", _case.Expected, result) + } + require.True(t, result) }) } } @@ -119,9 +131,9 @@ func TestWaitConfig_IsEmpty(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - must.Eq(t, tc.Expected, tc.Wait.IsEmpty()) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + require.Equal(t, _case.Expected, _case.Wait.IsEmpty()) }) } } @@ -164,9 +176,9 @@ func TestWaitConfig_IsEqual(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - must.Eq(t, tc.Expected, tc.Wait.Equal(tc.Other)) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + require.Equal(t, _case.Expected, _case.Wait.Equal(_case.Other)) }) } } @@ -214,13 +226,13 @@ func TestWaitConfig_IsValid(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - err := tc.Retry.Validate() - if tc.Expected == "" { - must.NoError(t, err) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + if _case.Expected == "" { + require.Nil(t, _case.Retry.Validate()) } else { - must.ErrorContains(t, err, tc.Expected) + err := _case.Retry.Validate() + require.Contains(t, err.Error(), _case.Expected) } }) } @@ -273,10 +285,14 @@ func TestWaitConfig_Merge(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - merged := tc.Target.Merge(tc.Other) - must.Equal(t, tc.Expected, merged) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + merged := _case.Target.Merge(_case.Other) + result := _case.Expected.Equal(merged) + if !result { + t.Logf("\nExpected %v\n Found %v", _case.Expected, merged) + } + require.True(t, result) }) } } @@ -296,9 +312,9 @@ func TestWaitConfig_ToConsulTemplate(t *testing.T) { } actual, err := clientWaitConfig.ToConsulTemplate() - must.NoError(t, err) - must.Eq(t, *expected.Min, *actual.Min) - must.Eq(t, *expected.Max, *actual.Max) + require.NoError(t, err) + require.Equal(t, *expected.Min, *actual.Min) + require.Equal(t, *expected.Max, *actual.Max) } func mockRetryConfig() *RetryConfig { @@ -376,9 +392,13 @@ func TestRetryConfig_Copy(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - must.Equal(t, tc.Expected, tc.Retry.Copy()) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + result := _case.Expected.Equal(_case.Retry.Copy()) + if !result { + t.Logf("\nExpected %v\n Found %v", _case.Expected, result) + } + require.True(t, result) }) } } @@ -410,9 +430,9 @@ func TestRetryConfig_IsEmpty(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - must.Eq(t, tc.Expected, tc.Retry.IsEmpty()) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + require.Equal(t, _case.Expected, _case.Retry.IsEmpty()) }) } } @@ -500,9 +520,9 @@ func TestRetryConfig_IsEqual(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - must.Eq(t, tc.Expected, tc.Retry.Equal(tc.Other)) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + require.Equal(t, _case.Expected, _case.Retry.Equal(_case.Other)) }) } } @@ -565,13 +585,13 @@ func TestRetryConfig_IsValid(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - err := tc.Retry.Validate() - if tc.Expected == "" { - must.NoError(t, err) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + if _case.Expected == "" { + require.Nil(t, _case.Retry.Validate()) } else { - must.ErrorContains(t, err, tc.Expected) + err := _case.Retry.Validate() + require.Contains(t, err.Error(), _case.Expected) } }) } @@ -642,10 +662,14 @@ func TestRetryConfig_Merge(t *testing.T) { }, } - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - merged := tc.Target.Merge(tc.Other) - must.Equal(t, tc.Expected, merged) + for _, _case := range cases { + t.Run(_case.Name, func(t *testing.T) { + merged := _case.Target.Merge(_case.Other) + result := _case.Expected.Equal(merged) + if !result { + t.Logf("\nExpected %v\n Found %v", _case.Expected, merged) + } + require.True(t, result) }) } } @@ -661,7 +685,8 @@ func TestRetryConfig_ToConsulTemplate(t *testing.T) { } actual := mockRetryConfig() - must.Eq(t, *expected.Attempts, *actual.Attempts) - must.Eq(t, *expected.Backoff, *actual.Backoff) - must.Eq(t, *expected.MaxBackoff, *actual.MaxBackoff) + + require.Equal(t, *expected.Attempts, *actual.Attempts) + require.Equal(t, *expected.Backoff, *actual.Backoff) + require.Equal(t, *expected.MaxBackoff, *actual.MaxBackoff) } diff --git a/client/config/users.go b/client/config/users.go deleted file mode 100644 index a8275e24f211..000000000000 --- a/client/config/users.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package config - -import sconfig "github.com/hashicorp/nomad/nomad/structs/config" - -// UsersConfig configures things related to operating system users. -type UsersConfig struct { - // MinDynamicUser is the lowest uid/gid for use in the dynamic users pool. - MinDynamicUser int - - // MaxDynamicUser is the highest uid/gid for use in the dynamic users pool. - MaxDynamicUser int -} - -func UsersConfigFromAgent(c *sconfig.UsersConfig) *UsersConfig { - return &UsersConfig{ - MinDynamicUser: *c.MinDynamicUser, - MaxDynamicUser: *c.MaxDynamicUser, - } -} - -func (u *UsersConfig) Copy() *UsersConfig { - if u == nil { - return nil - } - return &UsersConfig{ - MinDynamicUser: u.MinDynamicUser, - MaxDynamicUser: u.MaxDynamicUser, - } -} diff --git a/client/config/users_test.go b/client/config/users_test.go deleted file mode 100644 index 25d6a8ded9da..000000000000 --- a/client/config/users_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package config - -import ( - "testing" - - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/nomad/structs/config" - "github.com/shoenig/test/must" -) - -func TestUsersConfigFromAgent(t *testing.T) { - ci.Parallel(t) - - cases := []struct { - name string - config *config.UsersConfig - exp *UsersConfig - }{ - { - name: "from default", - config: config.DefaultUsersConfig(), - exp: &UsersConfig{ - MinDynamicUser: 80_000, - MaxDynamicUser: 89_999, - }, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - got := UsersConfigFromAgent(tc.config) - must.Eq(t, tc.exp, got) - }) - } -} - -func TestUsersConfig_Copy(t *testing.T) { - ci.Parallel(t) - - orig := &UsersConfig{ - MinDynamicUser: 70100, - MaxDynamicUser: 70200, - } - - configCopy := orig.Copy() - must.Eq(t, orig, configCopy) - - // modify copy and make sure original does not change - configCopy.MinDynamicUser = 100 - configCopy.MaxDynamicUser = 200 - - must.Eq(t, &UsersConfig{ - MinDynamicUser: 70100, - MaxDynamicUser: 70200, - }, orig) -} diff --git a/client/fs_endpoint.go b/client/fs_endpoint.go index 33f0cdb085b7..240a73c4bb50 100644 --- a/client/fs_endpoint.go +++ b/client/fs_endpoint.go @@ -19,7 +19,7 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hpcloud/tail/watch" "github.com/hashicorp/nomad/acl" diff --git a/client/fs_endpoint_test.go b/client/fs_endpoint_test.go index 670193e98575..5ebe1930e7d7 100644 --- a/client/fs_endpoint_test.go +++ b/client/fs_endpoint_test.go @@ -19,7 +19,7 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" @@ -43,7 +43,7 @@ func tempAllocDir(t testing.TB) *allocdir.AllocDir { require.NoError(t, os.Chmod(dir, 0o777)) - return allocdir.NewAllocDir(testlog.HCLogger(t), dir, dir, "test_allocid") + return allocdir.NewAllocDir(testlog.HCLogger(t), dir, "test_allocid") } type nopWriteCloser struct { diff --git a/client/heartbeatstop.go b/client/heartbeatstop.go index de3856786102..1d6ba61abf33 100644 --- a/client/heartbeatstop.go +++ b/client/heartbeatstop.go @@ -47,7 +47,7 @@ func newHeartbeatStop( // allocation to be stopped if the taskgroup is configured appropriately func (h *heartbeatStop) allocHook(alloc *structs.Allocation) { tg := allocTaskGroup(alloc) - if tg.GetDisconnectStopTimeout() != nil { + if tg.StopAfterClientDisconnect != nil { h.allocHookCh <- alloc } } @@ -56,9 +56,8 @@ func (h *heartbeatStop) allocHook(alloc *structs.Allocation) { // past that it should be prevented from restarting func (h *heartbeatStop) shouldStop(alloc *structs.Allocation) bool { tg := allocTaskGroup(alloc) - timeout := tg.GetDisconnectStopTimeout() - if timeout != nil { - return h.shouldStopAfter(time.Now(), *timeout) + if tg.StopAfterClientDisconnect != nil { + return h.shouldStopAfter(time.Now(), *tg.StopAfterClientDisconnect) } return false } @@ -104,9 +103,8 @@ func (h *heartbeatStop) watch() { case alloc := <-h.allocHookCh: tg := allocTaskGroup(alloc) - timeout := tg.GetDisconnectStopTimeout() - if timeout != nil { - h.allocInterval[alloc.ID] = *timeout + if tg.StopAfterClientDisconnect != nil { + h.allocInterval[alloc.ID] = *tg.StopAfterClientDisconnect } case <-timeout: diff --git a/client/heartbeatstop_test.go b/client/heartbeatstop_test.go index 99e904cee385..9de2eb07c3d6 100644 --- a/client/heartbeatstop_test.go +++ b/client/heartbeatstop_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestHeartbeatStop_allocHook(t *testing.T) { @@ -27,63 +27,6 @@ func TestHeartbeatStop_allocHook(t *testing.T) { }) defer cleanupC1() - // an allocation, with a tiny lease - d := 1 * time.Microsecond - alloc := &structs.Allocation{ - ID: uuid.Generate(), - TaskGroup: "foo", - Job: &structs.Job{ - TaskGroups: []*structs.TaskGroup{ - { - Name: "foo", - Disconnect: &structs.DisconnectStrategy{ - StopOnClientAfter: &d, - }, - }, - }, - }, - Resources: &structs.Resources{ - CPU: 100, - MemoryMB: 100, - DiskMB: 0, - }, - } - - // alloc added to heartbeatStop.allocs - err := client.addAlloc(alloc, "") - must.NoError(t, err) - testutil.WaitForResult(func() (bool, error) { - _, ok := client.heartbeatStop.allocInterval[alloc.ID] - return ok, nil - }, func(err error) { - must.NoError(t, err) - }) - - // the tiny lease causes the watch loop to destroy it - testutil.WaitForResult(func() (bool, error) { - _, ok := client.heartbeatStop.allocInterval[alloc.ID] - return !ok, nil - }, func(err error) { - must.NoError(t, err) - }) - - must.Nil(t, client.allocs[alloc.ID]) -} - -// Test using stop_after_client_disconnect, remove after its deprecated in favor -// of Disconnect.StopOnClientAfter introduced in 1.8.0. -func TestHeartbeatStop_allocHook_Disconnect(t *testing.T) { - ci.Parallel(t) - - server, _, cleanupS1 := testServer(t, nil) - defer cleanupS1() - testutil.WaitForLeader(t, server.RPC) - - client, cleanupC1 := TestClient(t, func(c *config.Config) { - c.RPCHandler = server - }) - defer cleanupC1() - // an allocation, with a tiny lease d := 1 * time.Microsecond alloc := &structs.Allocation{ @@ -106,12 +49,12 @@ func TestHeartbeatStop_allocHook_Disconnect(t *testing.T) { // alloc added to heartbeatStop.allocs err := client.addAlloc(alloc, "") - must.NoError(t, err) + require.NoError(t, err) testutil.WaitForResult(func() (bool, error) { _, ok := client.heartbeatStop.allocInterval[alloc.ID] return ok, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) // the tiny lease causes the watch loop to destroy it @@ -119,8 +62,8 @@ func TestHeartbeatStop_allocHook_Disconnect(t *testing.T) { _, ok := client.heartbeatStop.allocInterval[alloc.ID] return !ok, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) - must.Nil(t, client.allocs[alloc.ID]) + require.Empty(t, client.allocs[alloc.ID]) } diff --git a/client/logmon/logging/rotator_test.go b/client/logmon/logging/rotator_test.go index ef91338aedf9..bd7365a5e5ca 100644 --- a/client/logmon/logging/rotator_test.go +++ b/client/logmon/logging/rotator_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/testutil" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" "go.uber.org/goleak" ) @@ -24,8 +24,8 @@ func TestFileRotator_IncorrectPath(t *testing.T) { defer goleak.VerifyNone(t) _, err := NewFileRotator("/foo", baseFileName, 10, 10, testlog.HCLogger(t)) - must.Error(t, err) - must.ErrorContains(t, err, "no such file or directory") + require.Error(t, err) + require.Contains(t, err.Error(), "no such file or directory") } func TestFileRotator_CreateNewFile(t *testing.T) { @@ -34,11 +34,11 @@ func TestFileRotator_CreateNewFile(t *testing.T) { path := t.TempDir() fr, err := NewFileRotator(path, baseFileName, 10, 10, testlog.HCLogger(t)) - must.NoError(t, err) + require.NoError(t, err) defer fr.Close() _, err = os.Stat(filepath.Join(path, "redis.stdout.0")) - must.NoError(t, err) + require.NoError(t, err) } func TestFileRotator_OpenLastFile(t *testing.T) { @@ -50,18 +50,18 @@ func TestFileRotator_OpenLastFile(t *testing.T) { fname2 := filepath.Join(path, "redis.stdout.2") f1, err := os.Create(fname1) - must.NoError(t, err) + require.NoError(t, err) f1.Close() f2, err := os.Create(fname2) - must.NoError(t, err) + require.NoError(t, err) f2.Close() fr, err := NewFileRotator(path, baseFileName, 10, 10, testlog.HCLogger(t)) - must.NoError(t, err) + require.NoError(t, err) defer fr.Close() - must.Eq(t, fname2, fr.currentFile.Name()) + require.Equal(t, fname2, fr.currentFile.Name()) } func TestFileRotator_WriteToCurrentFile(t *testing.T) { @@ -71,11 +71,11 @@ func TestFileRotator_WriteToCurrentFile(t *testing.T) { fname1 := filepath.Join(path, "redis.stdout.0") f1, err := os.Create(fname1) - must.NoError(t, err) + require.NoError(t, err) f1.Close() fr, err := NewFileRotator(path, baseFileName, 10, 5, testlog.HCLogger(t)) - must.NoError(t, err) + require.NoError(t, err) defer fr.Close() fr.Write([]byte("abcde")) @@ -92,7 +92,7 @@ func TestFileRotator_WriteToCurrentFile(t *testing.T) { return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) } @@ -102,13 +102,13 @@ func TestFileRotator_RotateFiles(t *testing.T) { path := t.TempDir() fr, err := NewFileRotator(path, baseFileName, 10, 5, testlog.HCLogger(t)) - must.NoError(t, err) + require.NoError(t, err) defer fr.Close() str := "abcdefgh" nw, err := fr.Write([]byte(str)) - must.NoError(t, err) - must.Eq(t, len(str), nw) + require.NoError(t, err) + require.Equal(t, len(str), nw) testutil.WaitForResult(func() (bool, error) { fname1 := filepath.Join(path, "redis.stdout.0") @@ -135,7 +135,7 @@ func TestFileRotator_RotateFiles(t *testing.T) { return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) } @@ -145,7 +145,7 @@ func TestFileRotator_RotateFiles_Boundary(t *testing.T) { path := t.TempDir() fr, err := NewFileRotator(path, baseFileName, 10, 5, testlog.HCLogger(t)) - must.NoError(t, err) + require.NoError(t, err) defer fr.Close() // We will write three times: @@ -162,8 +162,8 @@ func TestFileRotator_RotateFiles_Boundary(t *testing.T) { for _, str := range []string{"ab\ncdef\n", "1234567890", "\n"} { nw, err := fr.Write([]byte(str)) - must.NoError(t, err) - must.Eq(t, len(str), nw) + require.NoError(t, err) + require.Equal(t, len(str), nw) } testutil.WaitForResult(func() (bool, error) { @@ -181,7 +181,7 @@ func TestFileRotator_RotateFiles_Boundary(t *testing.T) { return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) } @@ -192,16 +192,16 @@ func TestFileRotator_WriteRemaining(t *testing.T) { fname1 := filepath.Join(path, "redis.stdout.0") err := os.WriteFile(fname1, []byte("abcd"), 0600) - must.NoError(t, err) + require.NoError(t, err) fr, err := NewFileRotator(path, baseFileName, 10, 5, testlog.HCLogger(t)) - must.NoError(t, err) + require.NoError(t, err) defer fr.Close() str := "efghijkl" nw, err := fr.Write([]byte(str)) - must.NoError(t, err) - must.Eq(t, len(str), nw) + require.NoError(t, err) + require.Equal(t, len(str), nw) testutil.WaitForResult(func() (bool, error) { fi, err := os.Stat(fname1) @@ -240,7 +240,7 @@ func TestFileRotator_WriteRemaining(t *testing.T) { return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) } @@ -251,13 +251,13 @@ func TestFileRotator_PurgeOldFiles(t *testing.T) { path := t.TempDir() fr, err := NewFileRotator(path, baseFileName, 2, 2, testlog.HCLogger(t)) - must.NoError(t, err) + require.NoError(t, err) defer fr.Close() str := "abcdeghijklmn" nw, err := fr.Write([]byte(str)) - must.NoError(t, err) - must.Eq(t, len(str), nw) + require.NoError(t, err) + require.Equal(t, len(str), nw) testutil.WaitForResult(func() (bool, error) { f, err := os.ReadDir(path) @@ -271,7 +271,7 @@ func TestFileRotator_PurgeOldFiles(t *testing.T) { return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) } @@ -288,7 +288,7 @@ func benchmarkRotatorWithInputSize(size int, b *testing.B) { path := b.TempDir() fr, err := NewFileRotator(path, baseFileName, 5, 1024*1024, testlog.HCLogger(b)) - must.NoError(b, err) + require.NoError(b, err) defer fr.Close() b.ResetTimer() @@ -298,7 +298,7 @@ func benchmarkRotatorWithInputSize(size int, b *testing.B) { // Generate some input data := make([]byte, size) _, err := rand.Read(data) - must.NoError(b, err) + require.NoError(b, err) // Insert random new lines for i := 0; i < 100; i++ { @@ -308,6 +308,6 @@ func benchmarkRotatorWithInputSize(size int, b *testing.B) { // Write the data _, err = fr.Write(data) - must.NoError(b, err) + require.NoError(b, err) } } diff --git a/client/logmon/logmon_test.go b/client/logmon/logmon_test.go index 9242aee92d7a..8d7e54394e1a 100644 --- a/client/logmon/logmon_test.go +++ b/client/logmon/logmon_test.go @@ -16,12 +16,13 @@ import ( "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/testutil" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestLogmon_Start_rotate(t *testing.T) { ci.Parallel(t) + require := require.New(t) var stdoutFifoPath, stderrFifoPath string dir := t.TempDir() @@ -45,35 +46,35 @@ func TestLogmon_Start_rotate(t *testing.T) { } lm := NewLogMon(testlog.HCLogger(t)) - must.NoError(t, lm.Start(cfg)) + require.NoError(lm.Start(cfg)) stdout, err := fifo.OpenWriter(stdoutFifoPath) - must.NoError(t, err) + require.NoError(err) // Write enough bytes such that the log is rotated bytes1MB := make([]byte, 1024*1024) _, err = rand.Read(bytes1MB) - must.NoError(t, err) + require.NoError(err) _, err = stdout.Write(bytes1MB) - must.NoError(t, err) + require.NoError(err) testutil.WaitForResult(func() (bool, error) { _, err = os.Stat(filepath.Join(dir, "stdout.0")) return err == nil, err }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) testutil.WaitForResult(func() (bool, error) { _, err = os.Stat(filepath.Join(dir, "stdout.1")) return err == nil, err }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) _, err = os.Stat(filepath.Join(dir, "stdout.2")) - must.Error(t, err) - must.NoError(t, lm.Stop()) - must.NoError(t, lm.Stop()) + require.Error(err) + require.NoError(lm.Stop()) + require.NoError(lm.Stop()) } // asserts that calling Start twice restarts the log rotator and that any logs @@ -85,6 +86,7 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) { t.Skip("windows does not support pushing data to a pipe with no servers") } + require := require.New(t) var stdoutFifoPath, stderrFifoPath string dir := t.TempDir() @@ -109,17 +111,17 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) { lm := NewLogMon(testlog.HCLogger(t)) impl, ok := lm.(*logmonImpl) - must.True(t, ok) - must.NoError(t, lm.Start(cfg)) + require.True(ok) + require.NoError(lm.Start(cfg)) stdout, err := fifo.OpenWriter(stdoutFifoPath) - must.NoError(t, err) + require.NoError(err) stderr, err := fifo.OpenWriter(stderrFifoPath) - must.NoError(t, err) + require.NoError(err) // Write a string and assert it was written to the file _, err = stdout.Write([]byte("test\n")) - must.NoError(t, err) + require.NoError(err) testutil.WaitForResult(func() (bool, error) { raw, err := os.ReadFile(filepath.Join(dir, "stdout.0")) @@ -128,27 +130,27 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) { } return "test\n" == string(raw), fmt.Errorf("unexpected stdout %q", string(raw)) }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) - must.True(t, impl.tl.IsRunning()) + require.True(impl.tl.IsRunning()) // Close stdout and assert that logmon no longer writes to the file - must.NoError(t, stdout.Close()) - must.NoError(t, stderr.Close()) + require.NoError(stdout.Close()) + require.NoError(stderr.Close()) testutil.WaitForResult(func() (bool, error) { return !impl.tl.IsRunning(), fmt.Errorf("logmon is still running") }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) stdout, err = fifo.OpenWriter(stdoutFifoPath) - must.NoError(t, err) + require.NoError(err) stderr, err = fifo.OpenWriter(stderrFifoPath) - must.NoError(t, err) + require.NoError(err) _, err = stdout.Write([]byte("te")) - must.NoError(t, err) + require.NoError(err) testutil.WaitForResult(func() (bool, error) { raw, err := os.ReadFile(filepath.Join(dir, "stdout.0")) @@ -157,19 +159,19 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) { } return "test\n" == string(raw), fmt.Errorf("unexpected stdout %q", string(raw)) }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) // Start logmon again and assert that it appended to the file - must.NoError(t, lm.Start(cfg)) + require.NoError(lm.Start(cfg)) stdout, err = fifo.OpenWriter(stdoutFifoPath) - must.NoError(t, err) + require.NoError(err) stderr, err = fifo.OpenWriter(stderrFifoPath) - must.NoError(t, err) + require.NoError(err) _, err = stdout.Write([]byte("st\n")) - must.NoError(t, err) + require.NoError(err) testutil.WaitForResult(func() (bool, error) { raw, err := os.ReadFile(filepath.Join(dir, "stdout.0")) if err != nil { @@ -179,7 +181,7 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) { expected := "test\ntest\n" == string(raw) return expected, fmt.Errorf("unexpected stdout %q", string(raw)) }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) } @@ -187,6 +189,7 @@ func TestLogmon_Start_restart_flusheslogs(t *testing.T) { func TestLogmon_Start_restart(t *testing.T) { ci.Parallel(t) + require := require.New(t) var stdoutFifoPath, stderrFifoPath string dir := t.TempDir() @@ -211,20 +214,20 @@ func TestLogmon_Start_restart(t *testing.T) { lm := NewLogMon(testlog.HCLogger(t)) impl, ok := lm.(*logmonImpl) - must.True(t, ok) - must.NoError(t, lm.Start(cfg)) + require.True(ok) + require.NoError(lm.Start(cfg)) t.Cleanup(func() { - must.NoError(t, lm.Stop()) + require.NoError(lm.Stop()) }) stdout, err := fifo.OpenWriter(stdoutFifoPath) - must.NoError(t, err) + require.NoError(err) stderr, err := fifo.OpenWriter(stderrFifoPath) - must.NoError(t, err) + require.NoError(err) // Write a string and assert it was written to the file _, err = stdout.Write([]byte("test\n")) - must.NoError(t, err) + require.NoError(err) testutil.WaitForResult(func() (bool, error) { raw, err := os.ReadFile(filepath.Join(dir, "stdout.0")) @@ -233,37 +236,37 @@ func TestLogmon_Start_restart(t *testing.T) { } return "test\n" == string(raw), fmt.Errorf("unexpected stdout %q", string(raw)) }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) - must.True(t, impl.tl.IsRunning()) + require.True(impl.tl.IsRunning()) // Close stderr and assert that logmon no longer writes to the file // Keep stdout open to ensure that IsRunning requires both - must.NoError(t, stderr.Close()) + require.NoError(stderr.Close()) testutil.WaitForResult(func() (bool, error) { return !impl.tl.IsRunning(), fmt.Errorf("logmon is still running") }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) // Start logmon again and assert that it can receive logs again - must.NoError(t, lm.Start(cfg)) + require.NoError(lm.Start(cfg)) stdout, err = fifo.OpenWriter(stdoutFifoPath) - must.NoError(t, err) + require.NoError(err) t.Cleanup(func() { - must.NoError(t, stdout.Close()) + require.NoError(stdout.Close()) }) stderr, err = fifo.OpenWriter(stderrFifoPath) - must.NoError(t, err) + require.NoError(err) t.Cleanup(func() { - must.NoError(t, stderr.Close()) + require.NoError(stderr.Close()) }) _, err = stdout.Write([]byte("test\n")) - must.NoError(t, err) + require.NoError(err) testutil.WaitForResult(func() (bool, error) { raw, err := os.ReadFile(filepath.Join(dir, "stdout.0")) if err != nil { @@ -273,7 +276,7 @@ func TestLogmon_Start_restart(t *testing.T) { expected := "test\ntest\n" == string(raw) return expected, fmt.Errorf("unexpected stdout %q", string(raw)) }, func(err error) { - must.NoError(t, err) + require.NoError(err) }) } @@ -301,6 +304,6 @@ func TestLogmon_NewError(t *testing.T) { rotator := panicWriter{} w, err := newLogRotatorWrapper(path, logger, rotator) - must.Error(t, err) - must.Nil(t, w) + require.Error(t, err) + require.Nil(t, w) } diff --git a/client/rpc.go b/client/rpc.go index 6bbbe27f88cc..2482a9f68d28 100644 --- a/client/rpc.go +++ b/client/rpc.go @@ -12,7 +12,7 @@ import ( "time" metrics "github.com/armon/go-metrics" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/client/servers" "github.com/hashicorp/nomad/helper" inmem "github.com/hashicorp/nomad/helper/codec" diff --git a/client/state/upgrade.go b/client/state/upgrade.go index 6cf5aab7708b..5485e47eb3cb 100644 --- a/client/state/upgrade.go +++ b/client/state/upgrade.go @@ -10,7 +10,7 @@ import ( "os" hclog "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/client/dynamicplugins" "github.com/hashicorp/nomad/helper/boltdd" "github.com/hashicorp/nomad/nomad/structs" diff --git a/client/structs/structs.go b/client/structs/structs.go index 4b2fd8fe191d..013dae0bb7b7 100644 --- a/client/structs/structs.go +++ b/client/structs/structs.go @@ -3,7 +3,7 @@ package structs -//go:generate codecgen -c github.com/hashicorp/go-msgpack/v2/codec -st codec -d 102 -t codegen_generated -o structs.generated.go structs.go +//go:generate codecgen -c github.com/hashicorp/go-msgpack/codec -st codec -d 102 -t codegen_generated -o structs.generated.go structs.go import ( "errors" diff --git a/client/testutil/rpc.go b/client/testutil/rpc.go index 9627625be14c..e854129d11b4 100644 --- a/client/testutil/rpc.go +++ b/client/testutil/rpc.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" diff --git a/command/acl_binding_rule_update_test.go b/command/acl_binding_rule_update_test.go index 7b0366ee4010..ffc4b7a0cb8d 100644 --- a/command/acl_binding_rule_update_test.go +++ b/command/acl_binding_rule_update_test.go @@ -14,6 +14,7 @@ import ( "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestACLBindingRuleUpdateCommand_Run(t *testing.T) { @@ -28,7 +29,7 @@ func TestACLBindingRuleUpdateCommand_Run(t *testing.T) { // Wait for the server to start fully and ensure we have a bootstrap token. testutil.WaitForLeader(t, srv.Agent.RPC) rootACLToken := srv.RootToken - must.NotNil(t, rootACLToken) + require.NotNil(t, rootACLToken) ui := cli.NewMockUi() cmd := &ACLBindingRuleUpdateCommand{ diff --git a/command/acl_bootstrap_test.go b/command/acl_bootstrap_test.go index 85edc5fd14aa..404341f2cc83 100644 --- a/command/acl_bootstrap_test.go +++ b/command/acl_bootstrap_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestACLBootstrapCommand(t *testing.T) { @@ -36,7 +37,7 @@ func TestACLBootstrapCommand(t *testing.T) { out := ui.OutputWriter.String() must.StrContains(t, out, "Secret ID") - must.StrContains(t, out, "Expiry Time = ") + require.Contains(t, out, "Expiry Time = ") } // If a bootstrap token has already been created, attempts to create more should @@ -114,7 +115,7 @@ func TestACLBootstrapCommand_WithOperatorFileBootstrapToken(t *testing.T) { out := ui.OutputWriter.String() must.StrContains(t, out, mockToken.SecretID) - must.StrContains(t, out, "Expiry Time = ") + require.Contains(t, out, "Expiry Time = ") } // Attempting to bootstrap the server with an invalid operator provided token in a file should diff --git a/command/acl_role_create_test.go b/command/acl_role_create_test.go index 7221092c3bba..c002ece79454 100644 --- a/command/acl_role_create_test.go +++ b/command/acl_role_create_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestACLRoleCreateCommand_Run(t *testing.T) { @@ -26,7 +26,7 @@ func TestACLRoleCreateCommand_Run(t *testing.T) { // Wait for the server to start fully and ensure we have a bootstrap token. testutil.WaitForLeader(t, srv.Agent.RPC) rootACLToken := srv.RootToken - must.NotNil(t, rootACLToken) + require.NotNil(t, rootACLToken) ui := cli.NewMockUi() cmd := &ACLRoleCreateCommand{ @@ -37,20 +37,20 @@ func TestACLRoleCreateCommand_Run(t *testing.T) { } // Test the basic validation on the command. - must.One(t, cmd.Run([]string{"-address=" + url, "this-command-does-not-take-args"})) - must.StrContains(t, ui.ErrorWriter.String(), "This command takes no arguments") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url, "this-command-does-not-take-args"})) + require.Contains(t, ui.ErrorWriter.String(), "This command takes no arguments") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() - must.One(t, cmd.Run([]string{"-address=" + url})) - must.StrContains(t, ui.ErrorWriter.String(), "ACL role name must be specified using the -name flag") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url})) + require.Contains(t, ui.ErrorWriter.String(), "ACL role name must be specified using the -name flag") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() - must.One(t, cmd.Run([]string{"-address=" + url, `-name="foobar"`})) - must.StrContains(t, ui.ErrorWriter.String(), "At least one policy name must be specified using the -policy flag") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url, `-name="foobar"`})) + require.Contains(t, ui.ErrorWriter.String(), "At least one policy name must be specified using the -policy flag") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -65,18 +65,18 @@ func TestACLRoleCreateCommand_Run(t *testing.T) { } err := srv.Agent.Server().State().UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{&aclPolicy}) - must.NoError(t, err) + require.NoError(t, err) // Create an ACL role. args := []string{ "-address=" + url, "-token=" + rootACLToken.SecretID, "-name=acl-role-cli-test", "-policy=acl-role-cli-test-policy", "-description=acl-role-all-the-things", } - must.Zero(t, cmd.Run(args)) + require.Equal(t, 0, cmd.Run(args)) s := ui.OutputWriter.String() - must.StrContains(t, s, "Name = acl-role-cli-test") - must.StrContains(t, s, "Description = acl-role-all-the-things") - must.StrContains(t, s, "Policies = acl-role-cli-test-policy") + require.Contains(t, s, "Name = acl-role-cli-test") + require.Contains(t, s, "Description = acl-role-all-the-things") + require.Contains(t, s, "Policies = acl-role-cli-test-policy") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() diff --git a/command/acl_role_delete_test.go b/command/acl_role_delete_test.go index ea115807e801..1c7a7161cc8b 100644 --- a/command/acl_role_delete_test.go +++ b/command/acl_role_delete_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestACLRoleDeleteCommand_Run(t *testing.T) { @@ -27,7 +27,7 @@ func TestACLRoleDeleteCommand_Run(t *testing.T) { // Wait for the server to start fully and ensure we have a bootstrap token. testutil.WaitForLeader(t, srv.Agent.RPC) rootACLToken := srv.RootToken - must.NotNil(t, rootACLToken) + require.NotNil(t, rootACLToken) ui := cli.NewMockUi() cmd := &ACLRoleDeleteCommand{ @@ -39,15 +39,15 @@ func TestACLRoleDeleteCommand_Run(t *testing.T) { // Try and delete more than one ACL role. code := cmd.Run([]string{"-address=" + url, "acl-role-1", "acl-role-2"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "This command takes one argument") + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "This command takes one argument") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() // Try deleting a role that does not exist. - must.One(t, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, "acl-role-1"})) - must.StrContains(t, ui.ErrorWriter.String(), "ACL role not found") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, "acl-role-1"})) + require.Contains(t, ui.ErrorWriter.String(), "ACL role not found") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -62,7 +62,7 @@ func TestACLRoleDeleteCommand_Run(t *testing.T) { } err := srv.Agent.Server().State().UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{&aclPolicy}) - must.NoError(t, err) + require.NoError(t, err) // Create an ACL role referencing the previously created policy. aclRole := structs.ACLRole{ @@ -72,9 +72,9 @@ func TestACLRoleDeleteCommand_Run(t *testing.T) { } err = srv.Agent.Server().State().UpsertACLRoles( structs.MsgTypeTestSetup, 20, []*structs.ACLRole{&aclRole}, false) - must.NoError(t, err) + require.NoError(t, err) // Delete the existing ACL role. - must.Zero(t, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, aclRole.ID})) - must.StrContains(t, ui.OutputWriter.String(), "successfully deleted") + require.Equal(t, 0, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, aclRole.ID})) + require.Contains(t, ui.OutputWriter.String(), "successfully deleted") } diff --git a/command/acl_role_info_test.go b/command/acl_role_info_test.go index ad8921a5cb63..309fb3eb4be0 100644 --- a/command/acl_role_info_test.go +++ b/command/acl_role_info_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestACLRoleInfoCommand_Run(t *testing.T) { @@ -28,7 +28,7 @@ func TestACLRoleInfoCommand_Run(t *testing.T) { // Wait for the server to start fully and ensure we have a bootstrap token. testutil.WaitForLeader(t, srv.Agent.RPC) rootACLToken := srv.RootToken - must.NotNil(t, rootACLToken) + require.NotNil(t, rootACLToken) ui := cli.NewMockUi() cmd := &ACLRoleInfoCommand{ @@ -39,15 +39,15 @@ func TestACLRoleInfoCommand_Run(t *testing.T) { } // Perform a lookup without specifying an ID. - must.One(t, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID})) - must.StrContains(t, ui.ErrorWriter.String(), "This command takes one argument: ") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID})) + require.Contains(t, ui.ErrorWriter.String(), "This command takes one argument: ") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() // Perform a lookup specifying a random ID. - must.One(t, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, uuid.Generate()})) - must.StrContains(t, ui.ErrorWriter.String(), "ACL role not found") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, uuid.Generate()})) + require.Contains(t, ui.ErrorWriter.String(), "ACL role not found") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -62,7 +62,7 @@ func TestACLRoleInfoCommand_Run(t *testing.T) { } err := srv.Agent.Server().State().UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{&aclPolicy}) - must.NoError(t, err) + require.NoError(t, err) // Create an ACL role referencing the previously created policy. aclRole := structs.ACLRole{ @@ -72,26 +72,26 @@ func TestACLRoleInfoCommand_Run(t *testing.T) { } err = srv.Agent.Server().State().UpsertACLRoles( structs.MsgTypeTestSetup, 20, []*structs.ACLRole{&aclRole}, false) - must.NoError(t, err) + require.NoError(t, err) // Look up the ACL role using its ID. - must.Zero(t, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, aclRole.ID})) + require.Equal(t, 0, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, aclRole.ID})) s := ui.OutputWriter.String() - must.StrContains(t, s, fmt.Sprintf("ID = %s", aclRole.ID)) - must.StrContains(t, s, fmt.Sprintf("Name = %s", aclRole.Name)) - must.StrContains(t, s, "Description = ") - must.StrContains(t, s, fmt.Sprintf("Policies = %s", aclPolicy.Name)) + require.Contains(t, s, fmt.Sprintf("ID = %s", aclRole.ID)) + require.Contains(t, s, fmt.Sprintf("Name = %s", aclRole.Name)) + require.Contains(t, s, "Description = ") + require.Contains(t, s, fmt.Sprintf("Policies = %s", aclPolicy.Name)) ui.OutputWriter.Reset() ui.ErrorWriter.Reset() // Look up the ACL role using its Name. - must.Zero(t, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, "-by-name", aclRole.Name})) + require.Equal(t, 0, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, "-by-name", aclRole.Name})) s = ui.OutputWriter.String() - must.StrContains(t, s, fmt.Sprintf("ID = %s", aclRole.ID)) - must.StrContains(t, s, fmt.Sprintf("Name = %s", aclRole.Name)) - must.StrContains(t, s, "Description = ") - must.StrContains(t, s, fmt.Sprintf("Policies = %s", aclPolicy.Name)) + require.Contains(t, s, fmt.Sprintf("ID = %s", aclRole.ID)) + require.Contains(t, s, fmt.Sprintf("Name = %s", aclRole.Name)) + require.Contains(t, s, "Description = ") + require.Contains(t, s, fmt.Sprintf("Policies = %s", aclPolicy.Name)) ui.OutputWriter.Reset() ui.ErrorWriter.Reset() diff --git a/command/acl_role_list_test.go b/command/acl_role_list_test.go index 3513aeb4d990..7d074dda4b4c 100644 --- a/command/acl_role_list_test.go +++ b/command/acl_role_list_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestACLRoleListCommand_Run(t *testing.T) { @@ -27,7 +27,7 @@ func TestACLRoleListCommand_Run(t *testing.T) { // Wait for the server to start fully and ensure we have a bootstrap token. testutil.WaitForLeader(t, srv.Agent.RPC) rootACLToken := srv.RootToken - must.NotNil(t, rootACLToken) + require.NotNil(t, rootACLToken) ui := cli.NewMockUi() cmd := &ACLRoleListCommand{ @@ -38,8 +38,8 @@ func TestACLRoleListCommand_Run(t *testing.T) { } // Perform a list straight away without any roles held in state. - must.Zero(t, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID})) - must.StrContains(t, ui.OutputWriter.String(), "No ACL roles found") + require.Equal(t, 0, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID})) + require.Contains(t, ui.OutputWriter.String(), "No ACL roles found") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -54,7 +54,7 @@ func TestACLRoleListCommand_Run(t *testing.T) { } err := srv.Agent.Server().State().UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{&aclPolicy}) - must.NoError(t, err) + require.NoError(t, err) // Create an ACL role referencing the previously created policy. aclRole := structs.ACLRole{ @@ -64,16 +64,16 @@ func TestACLRoleListCommand_Run(t *testing.T) { } err = srv.Agent.Server().State().UpsertACLRoles( structs.MsgTypeTestSetup, 20, []*structs.ACLRole{&aclRole}, false) - must.NoError(t, err) + require.NoError(t, err) // Perform a listing to get the created role. - must.Zero(t, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID})) + require.Equal(t, 0, cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID})) s := ui.OutputWriter.String() - must.StrContains(t, s, "ID") - must.StrContains(t, s, "Name") - must.StrContains(t, s, "Policies") - must.StrContains(t, s, "acl-role-cli-test") - must.StrContains(t, s, "acl-role-policy-cli-test") + require.Contains(t, s, "ID") + require.Contains(t, s, "Name") + require.Contains(t, s, "Policies") + require.Contains(t, s, "acl-role-cli-test") + require.Contains(t, s, "acl-role-policy-cli-test") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() diff --git a/command/acl_role_test.go b/command/acl_role_test.go index e65ea78e1187..4a6a555a505c 100644 --- a/command/acl_role_test.go +++ b/command/acl_role_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/hashicorp/nomad/api" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func Test_formatACLRole(t *testing.T) { @@ -26,7 +26,7 @@ func Test_formatACLRole(t *testing.T) { } expectedOutput := "ID = this-is-usually-a-uuid\nName = this-is-my-friendly-name\nDescription = this-is-my-friendly-name\nPolicies = policy-link-1,policy-link-2,policy-link-3,policy-link-4\nCreate Index = 13\nModify Index = 1313" actualOutput := formatACLRole(&inputACLRole) - must.Eq(t, expectedOutput, actualOutput) + require.Equal(t, expectedOutput, actualOutput) } func Test_aclRolePolicyLinkToStringList(t *testing.T) { @@ -43,7 +43,7 @@ func Test_aclRolePolicyLinkToStringList(t *testing.T) { "z-policy-link-1", } actualOutput := aclRolePolicyLinkToStringList(inputPolicyLinks) - must.Eq(t, expectedOutput, actualOutput) + require.Equal(t, expectedOutput, actualOutput) } func Test_aclRolePolicyNamesToPolicyLinks(t *testing.T) { @@ -60,5 +60,5 @@ func Test_aclRolePolicyNamesToPolicyLinks(t *testing.T) { {Name: "policy-link-4"}, } actualOutput := aclRolePolicyNamesToPolicyLinks(inputPolicyNames) - must.SliceContainsAll(t, expectedOutput, actualOutput) + require.ElementsMatch(t, expectedOutput, actualOutput) } diff --git a/command/acl_role_update_test.go b/command/acl_role_update_test.go index 394c3414e1ac..869f3c03357a 100644 --- a/command/acl_role_update_test.go +++ b/command/acl_role_update_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestACLRoleUpdateCommand_Run(t *testing.T) { @@ -28,7 +28,7 @@ func TestACLRoleUpdateCommand_Run(t *testing.T) { // Wait for the server to start fully and ensure we have a bootstrap token. testutil.WaitForLeader(t, srv.Agent.RPC) rootACLToken := srv.RootToken - must.NotNil(t, rootACLToken) + require.NotNil(t, rootACLToken) ui := cli.NewMockUi() cmd := &ACLRoleUpdateCommand{ @@ -39,16 +39,16 @@ func TestACLRoleUpdateCommand_Run(t *testing.T) { } // Try calling the command without setting an ACL Role ID arg. - must.One(t, cmd.Run([]string{"-address=" + url})) - must.StrContains(t, ui.ErrorWriter.String(), "This command takes one argument") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url})) + require.Contains(t, ui.ErrorWriter.String(), "This command takes one argument") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() // Try calling the command with an ACL role ID that does not exist. code := cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, "catch-me-if-you-can"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "ACL role not found") + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "ACL role not found") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -63,7 +63,7 @@ func TestACLRoleUpdateCommand_Run(t *testing.T) { } err := srv.Agent.Server().State().UpsertACLPolicies( structs.MsgTypeTestSetup, 10, []*structs.ACLPolicy{&aclPolicy}) - must.NoError(t, err) + require.NoError(t, err) // Create an ACL role that can be used for updating. aclRole := structs.ACLRole{ @@ -75,12 +75,12 @@ func TestACLRoleUpdateCommand_Run(t *testing.T) { err = srv.Agent.Server().State().UpsertACLRoles( structs.MsgTypeTestSetup, 20, []*structs.ACLRole{&aclRole}, false) - must.NoError(t, err) + require.NoError(t, err) // Try a merge update without setting any parameters to update. code = cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, aclRole.ID}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "Please provide at least one flag to update the ACL role") + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "Please provide at least one flag to update the ACL role") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -88,39 +88,39 @@ func TestACLRoleUpdateCommand_Run(t *testing.T) { // Update the description using the merge method. code = cmd.Run([]string{ "-address=" + url, "-token=" + rootACLToken.SecretID, "-description=badger-badger-badger", aclRole.ID}) - must.Zero(t, code) + require.Equal(t, 0, code) s := ui.OutputWriter.String() - must.StrContains(t, s, fmt.Sprintf("ID = %s", aclRole.ID)) - must.StrContains(t, s, "Name = acl-role-cli-test") - must.StrContains(t, s, "Description = badger-badger-badger") - must.StrContains(t, s, "Policies = acl-role-cli-test-policy") + require.Contains(t, s, fmt.Sprintf("ID = %s", aclRole.ID)) + require.Contains(t, s, "Name = acl-role-cli-test") + require.Contains(t, s, "Description = badger-badger-badger") + require.Contains(t, s, "Policies = acl-role-cli-test-policy") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() // Try updating the role using no-merge without setting the required flags. code = cmd.Run([]string{"-address=" + url, "-token=" + rootACLToken.SecretID, "-no-merge", aclRole.ID}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "ACL role name must be specified using the -name flag") + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "ACL role name must be specified using the -name flag") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() code = cmd.Run([]string{ "-address=" + url, "-token=" + rootACLToken.SecretID, "-no-merge", "-name=update-role-name", aclRole.ID}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "At least one policy name must be specified using the -policy flag") + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "At least one policy name must be specified using the -policy flag") // Update the role using no-merge with all required flags set. code = cmd.Run([]string{ "-address=" + url, "-token=" + rootACLToken.SecretID, "-no-merge", "-name=update-role-name", "-description=updated-description", "-policy=acl-role-cli-test-policy", aclRole.ID}) - must.Zero(t, code) + require.Equal(t, 0, code) s = ui.OutputWriter.String() - must.StrContains(t, s, fmt.Sprintf("ID = %s", aclRole.ID)) - must.StrContains(t, s, "Name = update-role-name") - must.StrContains(t, s, "Description = updated-description") - must.StrContains(t, s, "Policies = acl-role-cli-test-policy") + require.Contains(t, s, fmt.Sprintf("ID = %s", aclRole.ID)) + require.Contains(t, s, "Name = update-role-name") + require.Contains(t, s, "Description = updated-description") + require.Contains(t, s, "Policies = acl-role-cli-test-policy") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() diff --git a/command/acl_token_create_test.go b/command/acl_token_create_test.go index 6afc140a1766..d7f657be9a3c 100644 --- a/command/acl_token_create_test.go +++ b/command/acl_token_create_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestACLTokenCreateCommand(t *testing.T) { @@ -38,12 +39,12 @@ func TestACLTokenCreateCommand(t *testing.T) { // Request to create a new token with a valid management token that does // not have an expiry set. code = cmd.Run([]string{"-address=" + url, "-token=" + token.SecretID, "-policy=foo", "-type=client"}) - must.Zero(t, code) + require.Equal(t, 0, code) // Check the output out := ui.OutputWriter.String() - must.StrContains(t, out, "[foo]") - must.StrContains(t, out, "Expiry Time = ") + require.Contains(t, out, "[foo]") + require.Contains(t, out, "Expiry Time = ") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -53,14 +54,14 @@ func TestACLTokenCreateCommand(t *testing.T) { var jsonMap map[string]interface{} for _, outputFormatFlag := range testCasesNoTTL { code = cmd.Run([]string{"-address=" + url, "-token=" + token.SecretID, "-policy=foo", "-type=client", outputFormatFlag}) - must.Zero(t, code) + require.Equal(t, 0, code) // Check the output out = ui.OutputWriter.String() - must.StrContains(t, out, "foo") + require.Contains(t, out, "foo") if outputFormatFlag == "-json" { err := json.Unmarshal([]byte(out), &jsonMap) - must.NoError(t, err) + require.Nil(t, err, "Output not in JSON format") } ui.OutputWriter.Reset() @@ -69,10 +70,10 @@ func TestACLTokenCreateCommand(t *testing.T) { // Create a new token that has an expiry TTL set and check the response. code = cmd.Run([]string{"-address=" + url, "-token=" + token.SecretID, "-type=management", "-ttl=10m"}) - must.Zero(t, code) + require.Equal(t, 0, code) out = ui.OutputWriter.String() - must.StrNotContains(t, out, "Expiry Time = ") + require.NotContains(t, out, "Expiry Time = ") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -80,15 +81,15 @@ func TestACLTokenCreateCommand(t *testing.T) { testCasesWithTTL := [][]string{{"-json", "ExpirationTTL"}, {"-t='{{ .ExpirationTTL }}'", "10m0s"}} for _, outputFormatFlag := range testCasesWithTTL { code = cmd.Run([]string{"-address=" + url, "-token=" + token.SecretID, "-type=management", "-ttl=10m", outputFormatFlag[0]}) - must.Zero(t, code) + require.Equal(t, 0, code) // Check the output out = ui.OutputWriter.String() if outputFormatFlag[0] == "-json" { err := json.Unmarshal([]byte(out), &jsonMap) - must.NoError(t, err) + require.Nil(t, err, "Output not in JSON format") } - must.StrContains(t, out, outputFormatFlag[1]) + require.Contains(t, out, outputFormatFlag[1]) ui.OutputWriter.Reset() ui.ErrorWriter.Reset() } @@ -116,5 +117,5 @@ func Test_generateACLTokenRoleLinks(t *testing.T) { {ID: "77a780d8-2dee-7c7f-7822-6f5471c5cbb2"}, {ID: "56850b06-a343-a772-1a5c-ad083fd8a50e"}, } - must.SliceContainsAll(t, generateACLTokenRoleLinks(inputRoleNames, inputRoleIDs), expectedOutput) + require.ElementsMatch(t, generateACLTokenRoleLinks(inputRoleNames, inputRoleIDs), expectedOutput) } diff --git a/command/agent/agent.go b/command/agent/agent.go index 9e589fd50fe0..3d7f8c441077 100644 --- a/command/agent/agent.go +++ b/command/agent/agent.go @@ -721,7 +721,6 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) { if agentConfig.DataDir != "" { conf.StateDir = filepath.Join(agentConfig.DataDir, "client") conf.AllocDir = filepath.Join(agentConfig.DataDir, "alloc") - conf.AllocMountsDir = filepath.Join(agentConfig.DataDir, "mounts") } if agentConfig.Client.StateDir != "" { conf.StateDir = agentConfig.Client.StateDir @@ -729,9 +728,6 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) { if agentConfig.Client.AllocDir != "" { conf.AllocDir = agentConfig.Client.AllocDir } - if agentConfig.Client.AllocMountsDir != "" { - conf.AllocMountsDir = agentConfig.Client.AllocMountsDir - } if agentConfig.Client.NetworkInterface != "" { conf.NetworkInterface = agentConfig.Client.NetworkInterface } @@ -906,8 +902,6 @@ func convertClientConfig(agentConfig *Config) (*clientconfig.Config, error) { } conf.Drain = drainConfig - conf.Users = clientconfig.UsersConfigFromAgent(agentConfig.Client.Users) - return conf, nil } diff --git a/command/agent/agent_endpoint.go b/command/agent/agent_endpoint.go index fc3259ac4548..8acd5aebfe49 100644 --- a/command/agent/agent_endpoint.go +++ b/command/agent/agent_endpoint.go @@ -18,7 +18,7 @@ import ( "github.com/docker/docker/pkg/ioutils" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/api" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/command/agent/host" diff --git a/command/agent/agent_endpoint_test.go b/command/agent/agent_endpoint_test.go index e5972ebdc9b2..8320ed72d891 100644 --- a/command/agent/agent_endpoint_test.go +++ b/command/agent/agent_endpoint_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" diff --git a/command/agent/alloc_endpoint.go b/command/agent/alloc_endpoint.go index 12bc37882c93..ff19e65b2ff4 100644 --- a/command/agent/alloc_endpoint.go +++ b/command/agent/alloc_endpoint.go @@ -17,7 +17,7 @@ import ( "github.com/golang/snappy" "github.com/gorilla/websocket" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" diff --git a/command/agent/alloc_endpoint_test.go b/command/agent/alloc_endpoint_test.go index fa5014dcdead..8af8d154a75f 100644 --- a/command/agent/alloc_endpoint_test.go +++ b/command/agent/alloc_endpoint_test.go @@ -19,7 +19,7 @@ import ( "github.com/golang/snappy" "github.com/gorilla/websocket" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/allocdir" diff --git a/command/agent/command.go b/command/agent/command.go index 7acc63f089f4..4b3f09dbe77d 100644 --- a/command/agent/command.go +++ b/command/agent/command.go @@ -110,7 +110,6 @@ func (c *Command) readConfig() *Config { // Client-only options flags.StringVar(&cmdConfig.Client.StateDir, "state-dir", "", "") flags.StringVar(&cmdConfig.Client.AllocDir, "alloc-dir", "", "") - flags.StringVar(&cmdConfig.Client.AllocMountsDir, "alloc-mounts-dir", "", "") flags.StringVar(&cmdConfig.Client.NodeClass, "node-class", "", "") flags.StringVar(&cmdConfig.Client.NodePool, "node-pool", "", "") flags.StringVar(&servers, "servers", "", "") @@ -383,11 +382,10 @@ func (c *Command) IsValidConfig(config, cmdConfig *Config) bool { // Verify the paths are absolute. dirs := map[string]string{ - "data-dir": config.DataDir, - "plugin-dir": config.PluginDir, - "alloc-dir": config.Client.AllocDir, - "alloc-mounts-dir": config.Client.AllocMountsDir, - "state-dir": config.Client.StateDir, + "data-dir": config.DataDir, + "plugin-dir": config.PluginDir, + "alloc-dir": config.Client.AllocDir, + "state-dir": config.Client.StateDir, } for k, dir := range dirs { if dir == "" { @@ -495,12 +493,8 @@ func (c *Command) IsValidConfig(config, cmdConfig *Config) bool { // The config is valid if the top-level data-dir is set or if both // alloc-dir and state-dir are set. if config.Client.Enabled && config.DataDir == "" { - missing := config.Client.AllocDir == "" || - config.Client.AllocMountsDir == "" || - config.Client.StateDir == "" || - config.PluginDir == "" - if missing { - c.Ui.Error("Must specify the state, alloc-dir, alloc-mounts-dir and plugin-dir if data-dir is omitted.") + if config.Client.AllocDir == "" || config.Client.StateDir == "" || config.PluginDir == "" { + c.Ui.Error("Must specify the state, alloc dir, and plugin dir if data-dir is omitted.") return false } } diff --git a/command/agent/command_test.go b/command/agent/command_test.go index 014f9a5d4f02..ce6cfed3e20c 100644 --- a/command/agent/command_test.go +++ b/command/agent/command_test.go @@ -59,7 +59,7 @@ func TestCommand_Args(t *testing.T) { }, { []string{"-client", "-alloc-dir="}, - "Must specify the state, alloc-dir, alloc-mounts-dir and plugin-dir if data-dir is omitted.", + "Must specify the state, alloc dir, and plugin dir if data-dir is omitted.", }, { []string{"-client", "-data-dir=" + tmpDir, "-meta=invalid..key=inaccessible-value"}, diff --git a/command/agent/config.go b/command/agent/config.go index 964764ee3946..29eef600afe2 100644 --- a/command/agent/config.go +++ b/command/agent/config.go @@ -223,9 +223,6 @@ type ClientConfig struct { // AllocDir is the directory for storing allocation data AllocDir string `hcl:"alloc_dir"` - // AllocMountsDir is the directory for storing mounts into allocation data - AllocMountsDir string `hcl:"alloc_mounts_dir"` - // Servers is a list of known server addresses. These are as "host:port" Servers []string `hcl:"servers"` @@ -383,9 +380,6 @@ type ClientConfig struct { // Drain specifies whether to drain the client on shutdown; ignored in dev mode. Drain *config.DrainConfig `hcl:"drain_on_shutdown"` - // Users is used to configure parameters around operating system users. - Users *config.UsersConfig `hcl:"users"` - // ExtraKeysHCL is used by hcl to surface unexpected keys ExtraKeysHCL []string `hcl:",unusedKeys" json:"-"` } @@ -409,7 +403,6 @@ func (c *ClientConfig) Copy() *ClientConfig { nc.NomadServiceDiscovery = pointer.Copy(c.NomadServiceDiscovery) nc.Artifact = c.Artifact.Copy() nc.Drain = c.Drain.Copy() - nc.Users = c.Users.Copy() nc.ExtraKeysHCL = slices.Clone(c.ExtraKeysHCL) return &nc } @@ -1394,7 +1387,6 @@ func DefaultConfig() *Config { NomadServiceDiscovery: pointer.Of(true), Artifact: config.DefaultArtifactConfig(), Drain: nil, - Users: config.DefaultUsersConfig(), }, Server: &ServerConfig{ Enabled: false, @@ -2249,9 +2241,6 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { if b.AllocDir != "" { result.AllocDir = b.AllocDir } - if b.AllocMountsDir != "" { - result.AllocMountsDir = b.AllocMountsDir - } if b.NodeClass != "" { result.NodeClass = b.NodeClass } @@ -2407,7 +2396,6 @@ func (a *ClientConfig) Merge(b *ClientConfig) *ClientConfig { result.Artifact = a.Artifact.Merge(b.Artifact) result.Drain = a.Drain.Merge(b.Drain) - result.Users = a.Users.Merge(b.Users) return &result } diff --git a/command/agent/config_parse_test.go b/command/agent/config_parse_test.go index 0a128c0cefbf..3854eed378d6 100644 --- a/command/agent/config_parse_test.go +++ b/command/agent/config_parse_test.go @@ -44,12 +44,11 @@ var basicConfig = &Config{ Serf: "127.0.0.4", }, Client: &ClientConfig{ - Enabled: true, - StateDir: "/tmp/client-state", - AllocDir: "/tmp/alloc", - AllocMountsDir: "/tmp/mounts", - Servers: []string{"a.b.c:80", "127.0.0.1:1234"}, - NodeClass: "linux-medium-64bit", + Enabled: true, + StateDir: "/tmp/client-state", + AllocDir: "/tmp/alloc", + Servers: []string{"a.b.c:80", "127.0.0.1:1234"}, + NodeClass: "linux-medium-64bit", ServerJoin: &ServerJoin{ RetryJoin: []string{"1.1.1.1", "2.2.2.2"}, RetryInterval: time.Duration(15) * time.Second, diff --git a/command/agent/consul/int_test.go b/command/agent/consul/int_test.go index 4ec6e34d7afb..df6266af8ac5 100644 --- a/command/agent/consul/int_test.go +++ b/command/agent/consul/int_test.go @@ -131,7 +131,7 @@ func TestConsul_Integration(t *testing.T) { logger := testlog.HCLogger(t) logUpdate := &mockUpdater{logger} - allocDir := allocdir.NewAllocDir(logger, conf.AllocDir, conf.AllocMountsDir, alloc.ID) + allocDir := allocdir.NewAllocDir(logger, conf.AllocDir, alloc.ID) if err := allocDir.Build(); err != nil { t.Fatalf("error building alloc dir: %v", err) } diff --git a/command/agent/event_endpoint.go b/command/agent/event_endpoint.go index 83aca538affe..8c5483ca3e0d 100644 --- a/command/agent/event_endpoint.go +++ b/command/agent/event_endpoint.go @@ -15,7 +15,7 @@ import ( "strings" "github.com/docker/docker/pkg/ioutils" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/nomad/structs" "golang.org/x/sync/errgroup" ) diff --git a/command/agent/fs_endpoint.go b/command/agent/fs_endpoint.go index b3f3acaa426f..910852bca275 100644 --- a/command/agent/fs_endpoint.go +++ b/command/agent/fs_endpoint.go @@ -14,7 +14,7 @@ import ( "strings" "github.com/docker/docker/pkg/ioutils" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/command/agent/helpers_test.go b/command/agent/helpers_test.go index 7620b0b6b31a..114e17a9655a 100644 --- a/command/agent/helpers_test.go +++ b/command/agent/helpers_test.go @@ -8,12 +8,12 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestHTTP_rpcHandlerForAlloc(t *testing.T) { ci.Parallel(t) - + require := require.New(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() @@ -23,25 +23,25 @@ func TestHTTP_rpcHandlerForAlloc(t *testing.T) { // Case 1: Client has allocation // Outcome: Use local client lc, rc, s := agent.Server.rpcHandlerForAlloc(a.ID) - must.True(t, lc) - must.False(t, rc) - must.False(t, s) + require.True(lc) + require.False(rc) + require.False(s) // Case 2: Client doesn't have allocation and there is a server // Outcome: Use server lc, rc, s = agent.Server.rpcHandlerForAlloc(uuid.Generate()) - must.False(t, lc) - must.False(t, rc) - must.True(t, s) + require.False(lc) + require.False(rc) + require.True(s) // Case 3: Client doesn't have allocation and there is no server // Outcome: Use client RPC to server srv := agent.server agent.server = nil lc, rc, s = agent.Server.rpcHandlerForAlloc(uuid.Generate()) - must.False(t, lc) - must.True(t, rc) - must.False(t, s) + require.False(lc) + require.True(rc) + require.False(s) agent.server = srv // Case 4: No client @@ -49,15 +49,15 @@ func TestHTTP_rpcHandlerForAlloc(t *testing.T) { client := agent.client agent.client = nil lc, rc, s = agent.Server.rpcHandlerForAlloc(uuid.Generate()) - must.False(t, lc) - must.False(t, rc) - must.True(t, s) + require.False(lc) + require.False(rc) + require.True(s) agent.client = client } func TestHTTP_rpcHandlerForNode(t *testing.T) { ci.Parallel(t) - + require := require.New(t) agent := NewTestAgent(t, t.Name(), nil) defer agent.Shutdown() @@ -66,25 +66,25 @@ func TestHTTP_rpcHandlerForNode(t *testing.T) { // Case 1: Node running, no node ID given // Outcome: Use local node lc, rc, s := agent.Server.rpcHandlerForNode("") - must.True(t, lc) - must.False(t, rc) - must.False(t, s) + require.True(lc) + require.False(rc) + require.False(s) // Case 2: Node running, it's ID given // Outcome: Use local node lc, rc, s = agent.Server.rpcHandlerForNode(cID) - must.True(t, lc) - must.False(t, rc) - must.False(t, s) + require.True(lc) + require.False(rc) + require.False(s) // Case 3: Local node but wrong ID and there is no server // Outcome: Use client RPC to server srv := agent.server agent.server = nil lc, rc, s = agent.Server.rpcHandlerForNode(uuid.Generate()) - must.False(t, lc) - must.True(t, rc) - must.False(t, s) + require.False(lc) + require.True(rc) + require.False(s) agent.server = srv // Case 4: No client @@ -92,8 +92,8 @@ func TestHTTP_rpcHandlerForNode(t *testing.T) { client := agent.client agent.client = nil lc, rc, s = agent.Server.rpcHandlerForNode(uuid.Generate()) - must.False(t, lc) - must.False(t, rc) - must.True(t, s) + require.False(lc) + require.False(rc) + require.True(s) agent.client = client } diff --git a/command/agent/host/host_test.go b/command/agent/host/host_test.go index 8c9e2e8e6197..8b685a152c85 100644 --- a/command/agent/host/host_test.go +++ b/command/agent/host/host_test.go @@ -6,17 +6,17 @@ package host import ( "testing" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestHostUtils(t *testing.T) { mounts := mountedPaths() - must.SliceNotEmpty(t, mounts) + require.NotEmpty(t, mounts) du, err := diskUsage("/") - must.NoError(t, err) - must.Positive(t, du.DiskMB) - must.Positive(t, du.UsedMB) + require.NoError(t, err) + require.NotZero(t, du.DiskMB) + require.NotZero(t, du.UsedMB) } func TestMakeHostData(t *testing.T) { @@ -27,15 +27,15 @@ func TestMakeHostData(t *testing.T) { t.Setenv("ryanSECRETS", "foo") host, err := MakeHostData() - must.NoError(t, err) - must.NotEq(t, "", host.OS) - must.SliceNotEmpty(t, host.Network) - must.NotEq(t, "", host.ResolvConf) - must.NotEq(t, "", host.Hosts) - must.MapNotEmpty(t, host.Disk) - must.MapNotEmpty(t, host.Environment) - must.Eq(t, "", host.Environment["VAULT_TOKEN"]) - must.Eq(t, "", host.Environment["BOGUS_TOKEN"]) - must.Eq(t, "", host.Environment["BOGUS_SECRET"]) - must.Eq(t, "", host.Environment["ryanSECRETS"]) + require.NoError(t, err) + require.NotEmpty(t, host.OS) + require.NotEmpty(t, host.Network) + require.NotEmpty(t, host.ResolvConf) + require.NotEmpty(t, host.Hosts) + require.NotEmpty(t, host.Disk) + require.NotEmpty(t, host.Environment) + require.Equal(t, "", host.Environment["VAULT_TOKEN"]) + require.Equal(t, "", host.Environment["BOGUS_TOKEN"]) + require.Equal(t, "", host.Environment["BOGUS_SECRET"]) + require.Equal(t, "", host.Environment["ryanSECRETS"]) } diff --git a/command/agent/http.go b/command/agent/http.go index c08d4fdf5104..35e3ccd557ee 100644 --- a/command/agent/http.go +++ b/command/agent/http.go @@ -25,7 +25,7 @@ import ( "github.com/gorilla/websocket" "github.com/hashicorp/go-connlimit" log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" multierror "github.com/hashicorp/go-multierror" "github.com/rs/cors" "golang.org/x/time/rate" diff --git a/command/agent/http_test.go b/command/agent/http_test.go index 19b70d30546b..528c08f6d840 100644 --- a/command/agent/http_test.go +++ b/command/agent/http_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" diff --git a/command/agent/job_endpoint.go b/command/agent/job_endpoint.go index ef427a03e9e5..73f46f6b9202 100644 --- a/command/agent/job_endpoint.go +++ b/command/agent/job_endpoint.go @@ -1164,21 +1164,6 @@ func ApiTgToStructsTG(job *structs.Job, taskGroup *api.TaskGroup, tg *structs.Ta } } - if taskGroup.Disconnect != nil { - tg.Disconnect = &structs.DisconnectStrategy{ - StopOnClientAfter: taskGroup.Disconnect.StopOnClientAfter, - Replace: taskGroup.Disconnect.Replace, - } - - if taskGroup.Disconnect.Reconcile != nil { - tg.Disconnect.Reconcile = *taskGroup.Disconnect.Reconcile - } - - if taskGroup.Disconnect.LostAfter != nil { - tg.Disconnect.LostAfter = *taskGroup.Disconnect.LostAfter - } - } - if taskGroup.Migrate != nil { tg.Migrate = &structs.MigrateStrategy{ MaxParallel: *taskGroup.Migrate.MaxParallel, @@ -1329,7 +1314,6 @@ func ApiTaskToStructsTask(job *structs.Job, group *structs.TaskGroup, Destination: *mount.Destination, ReadOnly: *mount.ReadOnly, PropagationMode: *mount.PropagationMode, - SELinuxLabel: *mount.SELinuxLabel, }) } } @@ -1355,12 +1339,11 @@ func ApiTaskToStructsTask(job *structs.Job, group *structs.TaskGroup, for _, ta := range apiTask.Artifacts { structsTask.Artifacts = append(structsTask.Artifacts, &structs.TaskArtifact{ - GetterSource: *ta.GetterSource, - GetterOptions: maps.Clone(ta.GetterOptions), - GetterHeaders: maps.Clone(ta.GetterHeaders), - GetterMode: *ta.GetterMode, - GetterInsecure: *ta.GetterInsecure, - RelativeDest: *ta.RelativeDest, + GetterSource: *ta.GetterSource, + GetterOptions: maps.Clone(ta.GetterOptions), + GetterHeaders: maps.Clone(ta.GetterHeaders), + GetterMode: *ta.GetterMode, + RelativeDest: *ta.RelativeDest, }) } } diff --git a/command/agent/job_endpoint_test.go b/command/agent/job_endpoint_test.go index fe0e08217d57..cac997fd4903 100644 --- a/command/agent/job_endpoint_test.go +++ b/command/agent/job_endpoint_test.go @@ -2752,9 +2752,6 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, }, - Disconnect: &api.DisconnectStrategy{ - LostAfter: pointer.Of(30 * time.Second), - }, MaxClientDisconnect: pointer.Of(30 * time.Second), Tasks: []*api.Task{ { @@ -3188,11 +3185,6 @@ func TestJobs_ApiJobToStructsJob(t *testing.T) { }, }, }, - Disconnect: &structs.DisconnectStrategy{ - LostAfter: 30 * time.Second, - Replace: pointer.Of(true), - Reconcile: structs.ReconcileOptionBestScore, - }, MaxClientDisconnect: pointer.Of(30 * time.Second), Tasks: []*structs.Task{ { diff --git a/command/agent/operator_endpoint.go b/command/agent/operator_endpoint.go index 5140bd28fe21..4634bf0d8c04 100644 --- a/command/agent/operator_endpoint.go +++ b/command/agent/operator_endpoint.go @@ -13,7 +13,7 @@ import ( "strings" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/raft" "github.com/hashicorp/nomad/api" diff --git a/command/agent/testdata/basic.hcl b/command/agent/testdata/basic.hcl index 4ee1f70d7708..f9d88d0a3799 100644 --- a/command/agent/testdata/basic.hcl +++ b/command/agent/testdata/basic.hcl @@ -42,12 +42,11 @@ advertise { } client { - enabled = true - state_dir = "/tmp/client-state" - alloc_dir = "/tmp/alloc" - alloc_mounts_dir = "/tmp/mounts" - servers = ["a.b.c:80", "127.0.0.1:1234"] - node_class = "linux-medium-64bit" + enabled = true + state_dir = "/tmp/client-state" + alloc_dir = "/tmp/alloc" + servers = ["a.b.c:80", "127.0.0.1:1234"] + node_class = "linux-medium-64bit" meta { foo = "bar" diff --git a/command/agent/testdata/basic.json b/command/agent/testdata/basic.json index bc29b1897190..4804e6a9c790 100644 --- a/command/agent/testdata/basic.json +++ b/command/agent/testdata/basic.json @@ -73,7 +73,6 @@ "client": [ { "alloc_dir": "/tmp/alloc", - "alloc_mounts_dir": "/tmp/mounts", "bridge_network_name": "custom_bridge_name", "bridge_network_subnet": "custom_bridge_subnet", "chroot_env": [ diff --git a/command/commands.go b/command/commands.go index 0fd77251e231..11887d1cca48 100644 --- a/command/commands.go +++ b/command/commands.go @@ -695,11 +695,6 @@ func Commands(metaPtr *Meta, agentUi cli.Ui) map[string]cli.CommandFactory { Meta: meta, }, nil }, - "operator autopilot health": func() (cli.Command, error) { - return &OperatorAutopilotHealthCommand{ - Meta: meta, - }, nil - }, "operator client-state": func() (cli.Command, error) { return &OperatorClientStateCommand{ diff --git a/command/data_format.go b/command/data_format.go index a3bf614d9113..a445538ef8db 100644 --- a/command/data_format.go +++ b/command/data_format.go @@ -9,7 +9,7 @@ import ( "text/template" "github.com/Masterminds/sprig/v3" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" ) var ( diff --git a/command/deployment_fail_test.go b/command/deployment_fail_test.go index c6d3c72330d2..b1e2757d88b6 100644 --- a/command/deployment_fail_test.go +++ b/command/deployment_fail_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestDeploymentFailCommand_Implements(t *testing.T) { @@ -44,6 +44,7 @@ func TestDeploymentFailCommand_Fails(t *testing.T) { func TestDeploymentFailCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -54,13 +55,13 @@ func TestDeploymentFailCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.Nil(t, state.UpsertDeployment(1000, d)) + assert.Nil(state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, d.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(d.ID, res[0]) } diff --git a/command/deployment_pause_test.go b/command/deployment_pause_test.go index f8356af7caca..8eea4bad4a4c 100644 --- a/command/deployment_pause_test.go +++ b/command/deployment_pause_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestDeploymentPauseCommand_Implements(t *testing.T) { @@ -44,6 +44,7 @@ func TestDeploymentPauseCommand_Fails(t *testing.T) { func TestDeploymentPauseCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -54,13 +55,13 @@ func TestDeploymentPauseCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + assert.Nil(state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, d.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(d.ID, res[0]) } diff --git a/command/deployment_promote_test.go b/command/deployment_promote_test.go index 38aefcddb9e8..5e84f681f4d5 100644 --- a/command/deployment_promote_test.go +++ b/command/deployment_promote_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestDeploymentPromoteCommand_Implements(t *testing.T) { @@ -44,6 +44,7 @@ func TestDeploymentPromoteCommand_Fails(t *testing.T) { func TestDeploymentPromoteCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -54,13 +55,13 @@ func TestDeploymentPromoteCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + assert.Nil(state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, d.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(d.ID, res[0]) } diff --git a/command/deployment_resume_test.go b/command/deployment_resume_test.go index ee4f97c1922e..ddc286559357 100644 --- a/command/deployment_resume_test.go +++ b/command/deployment_resume_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestDeploymentResumeCommand_Implements(t *testing.T) { @@ -44,6 +44,7 @@ func TestDeploymentResumeCommand_Fails(t *testing.T) { func TestDeploymentResumeCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -54,13 +55,13 @@ func TestDeploymentResumeCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + assert.Nil(state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, d.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(d.ID, res[0]) } diff --git a/command/deployment_status_test.go b/command/deployment_status_test.go index 62e86428203e..38377d741941 100644 --- a/command/deployment_status_test.go +++ b/command/deployment_status_test.go @@ -10,7 +10,8 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDeploymentStatusCommand_Implements(t *testing.T) { @@ -25,36 +26,37 @@ func TestDeploymentStatusCommand_Fails(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) + require.Equal(t, 1, code) out := ui.ErrorWriter.String() - must.StrContains(t, out, commandErrorText(cmd)) + require.Contains(t, out, commandErrorText(cmd)) ui.ErrorWriter.Reset() code = cmd.Run([]string{"-address=nope", "12"}) - must.One(t, code) + require.Equal(t, 1, code) out = ui.ErrorWriter.String() - must.StrContains(t, out, "Error retrieving deployment") + require.Contains(t, out, "Error retrieving deployment") ui.ErrorWriter.Reset() code = cmd.Run([]string{"-address=nope"}) - must.One(t, code) + require.Equal(t, 1, code) out = ui.ErrorWriter.String() // "deployments" indicates that we attempted to list all deployments - must.StrContains(t, out, "Error retrieving deployments") + require.Contains(t, out, "Error retrieving deployments") ui.ErrorWriter.Reset() // Fails if monitor passed with json or tmpl flags for _, flag := range []string{"-json", "-t"} { code = cmd.Run([]string{"-monitor", flag, "12"}) - must.One(t, code) + require.Equal(t, 1, code) out = ui.ErrorWriter.String() - must.StrContains(t, out, "The monitor flag cannot be used with the '-json' or '-t' flags") + require.Contains(t, out, "The monitor flag cannot be used with the '-json' or '-t' flags") ui.ErrorWriter.Reset() } } func TestDeploymentStatusCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -65,13 +67,13 @@ func TestDeploymentStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + assert.Nil(state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, d.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(d.ID, res[0]) } diff --git a/command/deployment_unblock_test.go b/command/deployment_unblock_test.go index 96430fc71f5c..b2b9110b5f7b 100644 --- a/command/deployment_unblock_test.go +++ b/command/deployment_unblock_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestDeploymentUnblockCommand_Implements(t *testing.T) { @@ -44,6 +44,7 @@ func TestDeploymentUnblockCommand_Fails(t *testing.T) { func TestDeploymentUnblockCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -54,13 +55,13 @@ func TestDeploymentUnblockCommand_AutocompleteArgs(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() d := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, d)) + assert.Nil(state.UpsertDeployment(1000, d)) prefix := d.ID[:5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, d.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(d.ID, res[0]) } diff --git a/command/eval_delete_test.go b/command/eval_delete_test.go index 9ada56f5091b..c6e907207002 100644 --- a/command/eval_delete_test.go +++ b/command/eval_delete_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestEvalDeleteCommand_Run(t *testing.T) { @@ -36,30 +36,30 @@ func TestEvalDeleteCommand_Run(t *testing.T) { } // Test basic command input validation. - must.One(t, cmd.Run([]string{"-address=" + url})) - must.StrContains(t, ui.ErrorWriter.String(), "Error validating command args and flags") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url})) + require.Contains(t, ui.ErrorWriter.String(), "Error validating command args and flags") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Try running the command when the eval broker is not paused. - must.One(t, cmd.Run([]string{"-address=" + url, "fa3a8c37-eac3-00c7-3410-5ba3f7318fd8"})) - must.StrContains(t, ui.ErrorWriter.String(), "Eval broker is not paused") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url, "fa3a8c37-eac3-00c7-3410-5ba3f7318fd8"})) + require.Contains(t, ui.ErrorWriter.String(), "Eval broker is not paused") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Paused the eval broker, then try deleting with an eval that // does not exist. schedulerConfig, _, err := client.Operator().SchedulerGetConfiguration(nil) - must.NoError(t, err) - must.False(t, schedulerConfig.SchedulerConfig.PauseEvalBroker) + require.NoError(t, err) + require.False(t, schedulerConfig.SchedulerConfig.PauseEvalBroker) schedulerConfig.SchedulerConfig.PauseEvalBroker = true _, _, err = client.Operator().SchedulerSetConfiguration(schedulerConfig.SchedulerConfig, nil) - must.NoError(t, err) - must.True(t, schedulerConfig.SchedulerConfig.PauseEvalBroker) + require.NoError(t, err) + require.True(t, schedulerConfig.SchedulerConfig.PauseEvalBroker) - must.One(t, cmd.Run([]string{"-address=" + url, "fa3a8c37-eac3-00c7-3410-5ba3f7318fd8"})) - must.StrContains(t, ui.ErrorWriter.String(), "eval not found") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url, "fa3a8c37-eac3-00c7-3410-5ba3f7318fd8"})) + require.Contains(t, ui.ErrorWriter.String(), "eval not found") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() }, @@ -82,13 +82,13 @@ func TestEvalDeleteCommand_Run(t *testing.T) { // Paused the eval broker. schedulerConfig, _, err := client.Operator().SchedulerGetConfiguration(nil) - must.NoError(t, err) - must.False(t, schedulerConfig.SchedulerConfig.PauseEvalBroker) + require.NoError(t, err) + require.False(t, schedulerConfig.SchedulerConfig.PauseEvalBroker) schedulerConfig.SchedulerConfig.PauseEvalBroker = true _, _, err = client.Operator().SchedulerSetConfiguration(schedulerConfig.SchedulerConfig, nil) - must.NoError(t, err) - must.True(t, schedulerConfig.SchedulerConfig.PauseEvalBroker) + require.NoError(t, err) + require.True(t, schedulerConfig.SchedulerConfig.PauseEvalBroker) // With the eval broker paused, run a job register several times // to generate evals that will not be acted on. @@ -97,20 +97,20 @@ func TestEvalDeleteCommand_Run(t *testing.T) { evalIDs := make([]string, 3) for i := 0; i < 3; i++ { regResp, _, err := client.Jobs().Register(testJob, nil) - must.NoError(t, err) - must.NotNil(t, regResp) - must.NotEq(t, "", regResp.EvalID) + require.NoError(t, err) + require.NotNil(t, regResp) + require.NotEmpty(t, regResp.EvalID) evalIDs[i] = regResp.EvalID } // Ensure we have three evaluations in state. evalList, _, err := client.Evaluations().List(nil) - must.NoError(t, err) - must.SliceLen(t, 3, evalList) + require.NoError(t, err) + require.Len(t, evalList, 3) // Attempted to delete one eval using the ID. - must.Zero(t, cmd.Run([]string{"-address=" + url, evalIDs[0]})) - must.StrContains(t, ui.OutputWriter.String(), "Successfully deleted 1 evaluation") + require.Equal(t, 0, cmd.Run([]string{"-address=" + url, evalIDs[0]})) + require.Contains(t, ui.OutputWriter.String(), "Successfully deleted 1 evaluation") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() @@ -122,15 +122,15 @@ func TestEvalDeleteCommand_Run(t *testing.T) { // Attempted to delete the remaining two evals using a filter // expression. expr := fmt.Sprintf("JobID == %q and Status == \"pending\" ", *testJob.Name) - must.Zero(t, cmd.Run([]string{"-address=" + url, "-filter=" + expr})) - must.StrContains(t, ui.OutputWriter.String(), "Successfully deleted 2 evaluations") + require.Equal(t, 0, cmd.Run([]string{"-address=" + url, "-filter=" + expr})) + require.Contains(t, ui.OutputWriter.String(), "Successfully deleted 2 evaluations") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Ensure we have zero evaluations in state. evalList, _, err = client.Evaluations().List(nil) - must.NoError(t, err) - must.SliceEmpty(t, evalList) + require.NoError(t, err) + require.Len(t, evalList, 0) }, name: "successful", }, @@ -181,7 +181,7 @@ func TestEvalDeleteCommand_verifyArgsAndFlags(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actualError := tc.inputEvalDeleteCommand.verifyArgsAndFlags(tc.inputArgs) - must.Eq(t, tc.expectedError, actualError) + require.Equal(t, tc.expectedError, actualError) }) } } diff --git a/command/eval_list_test.go b/command/eval_list_test.go index a359aeac1d6b..fbbf3a52e766 100644 --- a/command/eval_list_test.go +++ b/command/eval_list_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/hashicorp/nomad/ci" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestEvalList_ArgsWithoutPageToken(t *testing.T) { @@ -58,6 +58,8 @@ func TestEvalList_ArgsWithoutPageToken(t *testing.T) { for _, tc := range cases { args := strings.Split(tc.cli, " ") - must.Eq(t, tc.expected, argsWithoutPageToken(args), must.Sprintf("for input: %s", tc.cli)) + assert.Equal(t, tc.expected, argsWithoutPageToken(args), + "for input: %s", tc.cli) } + } diff --git a/command/eval_status_test.go b/command/eval_status_test.go index fa8f393c0476..9cab0b692d7d 100644 --- a/command/eval_status_test.go +++ b/command/eval_status_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestEvalStatusCommand_Implements(t *testing.T) { @@ -68,6 +68,7 @@ func TestEvalStatusCommand_Fails(t *testing.T) { func TestEvalStatusCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -78,13 +79,13 @@ func TestEvalStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake eval state := srv.Agent.Server().State() e := mock.Eval() - must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{e})) + assert.Nil(state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{e})) prefix := e.ID[:5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, e.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(e.ID, res[0]) } diff --git a/command/event_test.go b/command/event_test.go index 6175e41e91e4..40d2b96e857d 100644 --- a/command/event_test.go +++ b/command/event_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestEventCommand_BaseCommand(t *testing.T) { @@ -22,5 +22,5 @@ func TestEventCommand_BaseCommand(t *testing.T) { code := cmd.Run([]string{"-address=" + url}) - must.Eq(t, -18511, code) + require.Equal(t, -18511, code) } diff --git a/command/fmt_test.go b/command/fmt_test.go index a2cf0f028fac..873d90d83df6 100644 --- a/command/fmt_test.go +++ b/command/fmt_test.go @@ -13,6 +13,8 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestFmtCommand(t *testing.T) { @@ -139,10 +141,10 @@ func TestFmtCommand_FromWorkingDirectory(t *testing.T) { tmpDir := fmtFixtureWriteDir(t) cwd, err := os.Getwd() - must.NoError(t, err) + require.NoError(t, err) err = os.Chdir(tmpDir) - must.NoError(t, err) + require.NoError(t, err) defer os.Chdir(cwd) tests := []struct { @@ -249,7 +251,7 @@ func TestFmtCommand_FileDoesNotExist(t *testing.T) { } code := cmd.Run([]string{"file/does/not/exist.hcl"}) - must.One(t, code) + assert.Equal(t, 1, code) } func TestFmtCommand_InvalidSyntax(t *testing.T) { @@ -264,14 +266,14 @@ func TestFmtCommand_InvalidSyntax(t *testing.T) { } code := cmd.Run([]string{"-"}) - must.One(t, code) + assert.Equal(t, 1, code) } func fmtFixtureWriteDir(t *testing.T) string { dir := t.TempDir() err := os.WriteFile(filepath.Join(dir, fmtFixture.filename), fmtFixture.input, 0644) - must.NoError(t, err) + require.NoError(t, err) return dir } diff --git a/command/helper_devices_test.go b/command/helper_devices_test.go index 9c07ef2c37e1..217fdde2ee03 100644 --- a/command/helper_devices_test.go +++ b/command/helper_devices_test.go @@ -9,15 +9,18 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDeviceQualifiedID(t *testing.T) { ci.Parallel(t) - must.Eq(t, "vendor/type/name[id]", deviceQualifiedID("vendor", "type", "name", "id")) - must.Eq(t, "vendor/type[id]", deviceQualifiedID("vendor", "type", "", "id")) - must.Eq(t, "vendor[id]", deviceQualifiedID("vendor", "", "", "id")) + require := require.New(t) + + require.Equal("vendor/type/name[id]", deviceQualifiedID("vendor", "type", "name", "id")) + require.Equal("vendor/type[id]", deviceQualifiedID("vendor", "type", "", "id")) + require.Equal("vendor[id]", deviceQualifiedID("vendor", "", "", "id")) } func TestBuildDeviceStatsSummaryMap(t *testing.T) { @@ -74,9 +77,7 @@ func TestBuildDeviceStatsSummaryMap(t *testing.T) { }, } - must.Eq(t, expected, buildDeviceStatsSummaryMap(hostDeviceStats)) - // TODO(shoenig) figure out why the below does not compile - // must.MapContainsValues[map[string]*api.StatValue](t, expected, buildDeviceStatsSummaryMap(hostDeviceStats)) + require.EqualValues(t, expected, buildDeviceStatsSummaryMap(hostDeviceStats)) } func TestFormatDeviceStats(t *testing.T) { @@ -119,7 +120,7 @@ func TestFormatDeviceStats(t *testing.T) { result := formatDeviceStats("TestDeviceID", stat) // check that device id always appears first - must.Eq(t, "Device|TestDeviceID", result[0]) + require.Equal(t, "Device|TestDeviceID", result[0]) // check rest of values expected := []string{ @@ -133,7 +134,7 @@ func TestFormatDeviceStats(t *testing.T) { "nested2.k2|v2", } - must.Eq(t, expected, result) + require.Equal(t, expected, result) } func TestNodeStatusCommand_GetDeviceResourcesForNode(t *testing.T) { @@ -207,7 +208,7 @@ func TestNodeStatusCommand_GetDeviceResourcesForNode(t *testing.T) { "vendor2/type2[id2]|4", } - must.Eq(t, expected, formattedDevices) + assert.Equal(t, expected, formattedDevices) } func TestNodeStatusCommand_GetDeviceResources(t *testing.T) { @@ -257,7 +258,7 @@ func TestNodeStatusCommand_GetDeviceResources(t *testing.T) { "vendor2/type2[id2]|4", } - must.Eq(t, expected, formattedDevices) + assert.Equal(t, expected, formattedDevices) } func TestGetDeviceAttributes(t *testing.T) { ci.Parallel(t) @@ -285,5 +286,5 @@ func TestGetDeviceAttributes(t *testing.T) { "utilization|0.78 %", } - must.Eq(t, expected, formattedDevices) + assert.Equal(t, expected, formattedDevices) } diff --git a/command/helpers_test.go b/command/helpers_test.go index 25402b924327..25fb5b3b2695 100644 --- a/command/helpers_test.go +++ b/command/helpers_test.go @@ -23,6 +23,7 @@ import ( "github.com/kr/pretty" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestHelpers_FormatKV(t *testing.T) { @@ -314,22 +315,22 @@ func TestJobGetter_LocalFile_InvalidHCL2(t *testing.T) { for _, c := range cases { t.Run(c.name, func(t *testing.T) { fh, err := os.CreateTemp("", "nomad") - must.NoError(t, err) + require.NoError(t, err) defer os.Remove(fh.Name()) defer fh.Close() _, err = fh.WriteString(c.hcl) - must.NoError(t, err) + require.NoError(t, err) j := &JobGetter{} _, _, err = j.ApiJob(fh.Name()) - must.Error(t, err) + require.Error(t, err) exptMessage := "Failed to parse using HCL 2. Use the HCL 1" if c.expectHCL1Message { - must.ErrorContains(t, err, exptMessage) + require.Contains(t, err.Error(), exptMessage) } else { - must.StrNotContains(t, err.Error(), exptMessage) + require.NotContains(t, err.Error(), exptMessage) } }) } @@ -358,20 +359,20 @@ job "example" { expected := []string{"default-val", "from-cli", "from-varfile", "from-envvar"} hclf, err := os.CreateTemp("", "hcl") - must.NoError(t, err) + require.NoError(t, err) defer os.Remove(hclf.Name()) defer hclf.Close() _, err = hclf.WriteString(hcl) - must.NoError(t, err) + require.NoError(t, err) vf, err := os.CreateTemp("", "var.hcl") - must.NoError(t, err) + require.NoError(t, err) defer os.Remove(vf.Name()) defer vf.Close() _, err = vf.WriteString(fileVars + "\n") - must.NoError(t, err) + require.NoError(t, err) jg := &JobGetter{ Vars: cliArgs, @@ -380,10 +381,10 @@ job "example" { } _, j, err := jg.Get(hclf.Name()) - must.NoError(t, err) + require.NoError(t, err) - must.NotNil(t, j) - must.Eq(t, expected, j.Datacenters) + require.NotNil(t, j) + require.Equal(t, expected, j.Datacenters) } func TestJobGetter_HCL2_Variables_StrictFalse(t *testing.T) { @@ -413,20 +414,20 @@ unsedVar2 = "from-varfile" expected := []string{"default-val", "from-cli", "from-varfile", "from-envvar"} hclf, err := os.CreateTemp("", "hcl") - must.NoError(t, err) + require.NoError(t, err) defer os.Remove(hclf.Name()) defer hclf.Close() _, err = hclf.WriteString(hcl) - must.NoError(t, err) + require.NoError(t, err) vf, err := os.CreateTemp("", "var.hcl") - must.NoError(t, err) + require.NoError(t, err) defer os.Remove(vf.Name()) defer vf.Close() _, err = vf.WriteString(fileVars + "\n") - must.NoError(t, err) + require.NoError(t, err) jg := &JobGetter{ Vars: cliArgs, @@ -435,9 +436,10 @@ unsedVar2 = "from-varfile" } _, j, err := jg.Get(hclf.Name()) - must.NoError(t, err) - must.NotNil(t, j) - must.Eq(t, expected, j.Datacenters) + require.NoError(t, err) + + require.NotNil(t, j) + require.Equal(t, expected, j.Datacenters) } // Test StructJob with jobfile from HTTP Server @@ -533,9 +535,9 @@ func TestJobGetter_Validate(t *testing.T) { switch tc.errContains { case "": - must.NoError(t, err) + require.NoError(t, err) default: - must.ErrorContains(t, err, tc.errContains) + require.ErrorContains(t, err, tc.errContains) } }) @@ -608,27 +610,27 @@ func TestUiErrorWriter(t *testing.T) { partialAcc := "" for _, in := range inputs { n, err := w.Write([]byte(in)) - must.NoError(t, err) - must.Eq(t, len(in), n) + require.NoError(t, err) + require.Equal(t, len(in), n) // assert that writer emits partial result until last new line partialAcc += strings.ReplaceAll(in, "\r\n", "\n") lastNL := strings.LastIndex(partialAcc, "\n") - must.Eq(t, partialAcc[:lastNL+1], errBuf.String()) + require.Equal(t, partialAcc[:lastNL+1], errBuf.String()) } - must.Eq(t, "", outBuf.String()) + require.Empty(t, outBuf.String()) // note that the \r\n got replaced by \n expectedErr := "some line\nmultiple\nlines\nhere with followup\nand more lines without new line until here\n" - must.Eq(t, expectedErr, errBuf.String()) + require.Equal(t, expectedErr, errBuf.String()) // close emits the final line err := w.Close() - must.NoError(t, err) + require.NoError(t, err) expectedErr += "and thensome more\n" - must.Eq(t, expectedErr, errBuf.String()) + require.Equal(t, expectedErr, errBuf.String()) } func Test_extractVarFiles(t *testing.T) { diff --git a/command/integration_test.go b/command/integration_test.go index bd3d77a31e10..482e6a1a2181 100644 --- a/command/integration_test.go +++ b/command/integration_test.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/testutil" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestIntegration_Command_NomadInit(t *testing.T) { @@ -43,6 +43,7 @@ func TestIntegration_Command_RoundTripJob(t *testing.T) { ci.Parallel(t) testutil.DockerCompatible(t) + assert := assert.New(t) tmpDir := t.TempDir() // Start in dev mode so we get a node registration @@ -52,7 +53,7 @@ func TestIntegration_Command_RoundTripJob(t *testing.T) { { cmd := exec.Command("nomad", "job", "init", "-short") cmd.Dir = tmpDir - must.NoError(t, cmd.Run()) + assert.Nil(cmd.Run()) } { @@ -70,16 +71,16 @@ func TestIntegration_Command_RoundTripJob(t *testing.T) { cmd.Dir = tmpDir cmd.Env = []string{fmt.Sprintf("NOMAD_ADDR=%s", url)} out, err := cmd.Output() - must.NoError(t, err) + assert.Nil(err) var req api.JobRegisterRequest dec := json.NewDecoder(bytes.NewReader(out)) - must.NoError(t, dec.Decode(&req)) + assert.Nil(dec.Decode(&req)) var resp api.JobRegisterResponse _, err = client.Raw().Write("/v1/jobs", req, &resp, nil) - must.NoError(t, err) - must.NotEq(t, "", resp.EvalID) + assert.Nil(err) + assert.NotZero(resp.EvalID) } { @@ -87,6 +88,6 @@ func TestIntegration_Command_RoundTripJob(t *testing.T) { cmd.Dir = tmpDir cmd.Env = []string{fmt.Sprintf("NOMAD_ADDR=%s", url)} _, err := cmd.Output() - must.NoError(t, err) + assert.Nil(err) } } diff --git a/command/job_allocs_test.go b/command/job_allocs_test.go index 4f51eec6fddb..c4d8117a3ba6 100644 --- a/command/job_allocs_test.go +++ b/command/job_allocs_test.go @@ -14,6 +14,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestJobAllocsCommand_Implements(t *testing.T) { @@ -32,24 +33,24 @@ func TestJobAllocsCommand_Fails(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) outerr := ui.ErrorWriter.String() - must.One(t, code) - must.StrContains(t, outerr, commandErrorText(cmd)) + require.Equalf(t, 1, code, "expected exit code 1, got: %d", code) + require.Containsf(t, outerr, commandErrorText(cmd), "expected help output, got: %s", outerr) ui.ErrorWriter.Reset() // Bad address code = cmd.Run([]string{"-address=nope", "foo"}) outerr = ui.ErrorWriter.String() - must.One(t, code) - must.StrContains(t, outerr, "Error querying job prefix") + require.Equalf(t, 1, code, "expected exit code 1, got: %d", code) + require.Containsf(t, outerr, "Error querying job prefix", "expected failed query error, got: %s", outerr) ui.ErrorWriter.Reset() // Bad job name code = cmd.Run([]string{"-address=" + url, "foo"}) outerr = ui.ErrorWriter.String() - must.One(t, code) - must.StrContains(t, outerr, "No job(s) with prefix or ID \"foo\" found") + require.Equalf(t, 1, code, "expected exit 1, got: %d", code) + require.Containsf(t, outerr, "No job(s) with prefix or ID \"foo\" found", "expected no job found, got: %s", outerr) ui.ErrorWriter.Reset() } @@ -65,13 +66,13 @@ func TestJobAllocsCommand_Run(t *testing.T) { // Create a job without an allocation job := mock.Job() state := srv.Agent.Server().State() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) // Should display no match if the job doesn't have allocations code := cmd.Run([]string{"-address=" + url, job.ID}) out := ui.OutputWriter.String() - must.Zero(t, code) - must.StrContains(t, out, "No allocations placed") + require.Equalf(t, 0, code, "expected exit 0, got: %d", code) + require.Containsf(t, out, "No allocations placed", "expected no allocations placed, got: %s", out) ui.OutputWriter.Reset() @@ -83,15 +84,15 @@ func TestJobAllocsCommand_Run(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a})) + require.Nil(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a})) // Should now display the alloc code = cmd.Run([]string{"-address=" + url, "-verbose", job.ID}) out = ui.OutputWriter.String() outerr := ui.ErrorWriter.String() - must.Zero(t, code) - must.Eq(t, "", outerr) - must.StrContains(t, out, a.ID) + require.Equalf(t, 0, code, "expected exit 0, got: %d", code) + require.Emptyf(t, outerr, "expected no error output, got: \n\n%s", outerr) + require.Containsf(t, out, a.ID, "expected alloc output, got: %s", out) ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -108,7 +109,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { // Create a job job := mock.Job() state := srv.Agent.Server().State() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) // Inject a running allocation a := mock.Alloc() @@ -118,7 +119,7 @@ func TestJobAllocsCommand_Template(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a})) + require.Nil(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 200, []*structs.Allocation{a})) // Inject a pending allocation b := mock.Alloc() @@ -128,16 +129,16 @@ func TestJobAllocsCommand_Template(t *testing.T) { b.Metrics = &structs.AllocMetric{} b.DesiredStatus = structs.AllocDesiredStatusRun b.ClientStatus = structs.AllocClientStatusPending - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 300, []*structs.Allocation{b})) + require.Nil(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 300, []*structs.Allocation{b})) // Should display an AllocacitonListStub object code := cmd.Run([]string{"-address=" + url, "-t", "'{{printf \"%#+v\" .}}'", job.ID}) out := ui.OutputWriter.String() outerr := ui.ErrorWriter.String() - must.Zero(t, code) - must.Eq(t, "", outerr) - must.StrContains(t, out, "api.AllocationListStub") + require.Equalf(t, 0, code, "expected exit 0, got: %d", code) + require.Emptyf(t, outerr, "expected no error output, got: \n\n%s", outerr) + require.Containsf(t, out, "api.AllocationListStub", "expected alloc output, got: %s", out) ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -147,10 +148,10 @@ func TestJobAllocsCommand_Template(t *testing.T) { out = ui.OutputWriter.String() outerr = ui.ErrorWriter.String() - must.Zero(t, code) - must.Eq(t, "", outerr) - must.StrContains(t, out, a.ID) - must.StrNotContains(t, out, b.ID) + require.Equalf(t, 0, code, "expected exit 0, got: %d", code) + require.Emptyf(t, outerr, "expected no error output, got: \n\n%s", outerr) + require.Containsf(t, out, a.ID, "expected ID of alloc a, got: %s", out) + require.NotContainsf(t, out, b.ID, "should not contain ID of alloc b, got: %s", out) ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -167,15 +168,15 @@ func TestJobAllocsCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j.ID, res[0]) + require.Equal(t, 1, len(res)) + require.Equal(t, j.ID, res[0]) } func TestJobAllocsCommand_ACL(t *testing.T) { diff --git a/command/job_deployments_test.go b/command/job_deployments_test.go index ab1f43366756..21148dc3e726 100644 --- a/command/job_deployments_test.go +++ b/command/job_deployments_test.go @@ -15,6 +15,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestJobDeploymentsCommand_Implements(t *testing.T) { @@ -48,6 +49,7 @@ func TestJobDeploymentsCommand_Fails(t *testing.T) { func TestJobDeploymentsCommand_Run(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -62,7 +64,7 @@ func TestJobDeploymentsCommand_Run(t *testing.T) { // Create a job without a deployment job := mock.Job() state := srv.Agent.Server().State() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) // Should display no match if the job doesn't have deployments if code := cmd.Run([]string{"-address=" + url, job.ID}); code != 0 { @@ -77,7 +79,7 @@ func TestJobDeploymentsCommand_Run(t *testing.T) { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - must.NoError(t, state.UpsertDeployment(200, d)) + assert.Nil(state.UpsertDeployment(200, d)) // Should now display the deployment if code := cmd.Run([]string{"-address=" + url, "-verbose", job.ID}); code != 0 { @@ -91,7 +93,7 @@ func TestJobDeploymentsCommand_Run(t *testing.T) { func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { ci.Parallel(t) - + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -106,7 +108,7 @@ func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { // Create a job without a deployment job := mock.Job() state := srv.Agent.Server().State() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 100, nil, job)) // Should display no match if the job doesn't have deployments if code := cmd.Run([]string{"-address=" + url, "-latest", job.ID}); code != 0 { @@ -121,7 +123,7 @@ func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { d := mock.Deployment() d.JobID = job.ID d.JobCreateIndex = job.CreateIndex - must.NoError(t, state.UpsertDeployment(200, d)) + assert.Nil(state.UpsertDeployment(200, d)) // Should now display the deployment if code := cmd.Run([]string{"-address=" + url, "-verbose", "-latest", job.ID}); code != 0 { @@ -135,6 +137,7 @@ func TestJobDeploymentsCommand_Run_Latest(t *testing.T) { func TestJobDeploymentsCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -145,15 +148,15 @@ func TestJobDeploymentsCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(j.ID, res[0]) } func TestJobDeploymentsCommand_ACL(t *testing.T) { diff --git a/command/job_dispatch_test.go b/command/job_dispatch_test.go index d5e65796a25f..bbd4d55ad6a8 100644 --- a/command/job_dispatch_test.go +++ b/command/job_dispatch_test.go @@ -15,6 +15,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestJobDispatchCommand_Implements(t *testing.T) { @@ -66,7 +67,7 @@ func TestJobDispatchCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} @@ -74,12 +75,12 @@ func TestJobDispatchCommand_AutocompleteArgs(t *testing.T) { // No parameterized jobs, should be 0 results res := predictor.Predict(args) - must.SliceEmpty(t, res) + require.Equal(t, 0, len(res)) // Create a fake parameterized job j1 := mock.Job() j1.ParameterizedJob = &structs.ParameterizedJobConfig{} - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 2000, nil, j1)) + require.Nil(t, state.UpsertJob(structs.MsgTypeTestSetup, 2000, nil, j1)) prefix = j1.ID[:len(j1.ID)-5] args = complete.Args{Last: prefix} @@ -87,8 +88,8 @@ func TestJobDispatchCommand_AutocompleteArgs(t *testing.T) { // Should return 1 parameterized job res = predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j1.ID, res[0]) + require.Equal(t, 1, len(res)) + require.Equal(t, j1.ID, res[0]) } func TestJobDispatchCommand_ACL(t *testing.T) { diff --git a/command/job_eval_test.go b/command/job_eval_test.go index fd081b9ee971..d16e1fc12ffb 100644 --- a/command/job_eval_test.go +++ b/command/job_eval_test.go @@ -17,6 +17,8 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJobEvalCommand_Implements(t *testing.T) { @@ -72,16 +74,17 @@ func TestJobEvalCommand_Run(t *testing.T) { ui := cli.NewMockUi() cmd := &JobEvalCommand{Meta: Meta{Ui: ui}} + require := require.New(t) state := srv.Agent.Server().State() // Create a job job := mock.Job() err := state.UpsertJob(structs.MsgTypeTestSetup, 11, nil, job) - must.NoError(t, err) + require.Nil(err) job, err = state.JobByID(nil, structs.DefaultNamespace, job.ID) - must.NoError(t, err) + require.Nil(err) // Create a failed alloc for the job alloc := mock.Alloc() @@ -91,7 +94,7 @@ func TestJobEvalCommand_Run(t *testing.T) { alloc.Namespace = job.Namespace alloc.ClientStatus = structs.AllocClientStatusFailed err = state.UpsertAllocs(structs.MsgTypeTestSetup, 12, []*structs.Allocation{alloc}) - must.NoError(t, err) + require.Nil(err) if code := cmd.Run([]string{"-address=" + url, "-force-reschedule", "-detach", job.ID}); code != 0 { t.Fatalf("expected exit 0, got: %d", code) @@ -99,14 +102,15 @@ func TestJobEvalCommand_Run(t *testing.T) { // Lookup alloc again alloc, err = state.AllocByID(nil, alloc.ID) - must.NotNil(t, alloc) - must.Nil(t, err) - must.True(t, *alloc.DesiredTransition.ForceReschedule) + require.NotNil(alloc) + require.Nil(err) + require.True(*alloc.DesiredTransition.ForceReschedule) } func TestJobEvalCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -117,15 +121,15 @@ func TestJobEvalCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(j.ID, res[0]) } func TestJobEvalCommand_ACL(t *testing.T) { diff --git a/command/job_history_test.go b/command/job_history_test.go index d4be11e87901..5a4cf64bcfa7 100644 --- a/command/job_history_test.go +++ b/command/job_history_test.go @@ -15,6 +15,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestJobHistoryCommand_Implements(t *testing.T) { @@ -47,6 +48,7 @@ func TestJobHistoryCommand_Fails(t *testing.T) { func TestJobHistoryCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -57,15 +59,15 @@ func TestJobHistoryCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(j.ID, res[0]) } func TestJobHistoryCommand_ACL(t *testing.T) { diff --git a/command/job_init_test.go b/command/job_init_test.go index 556d66539f43..e40813e7d843 100644 --- a/command/job_init_test.go +++ b/command/job_init_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/nomad/command/asset" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestInitCommand_Implements(t *testing.T) { @@ -63,12 +64,12 @@ func TestInitCommand_Run(t *testing.T) { // Works with -short flag os.Remove(DefaultInitName) if code := cmd.Run([]string{"-short"}); code != 0 { - must.Zero(t, code) + require.Zero(t, code, "unexpected exit code: %d", code) } content, err = os.ReadFile(DefaultInitName) - must.NoError(t, err) + require.NoError(t, err) shortJob := asset.JobExampleShort - must.Eq(t, string(content), string(shortJob)) + require.Equal(t, string(content), string(shortJob)) // Fails if the file exists if code := cmd.Run([]string{}); code != 1 { @@ -203,12 +204,12 @@ func TestInitCommand_customFilename(t *testing.T) { // Works with -short flag os.Remove(filename) if code := cmd.Run([]string{"-short", filename}); code != 0 { - must.Zero(t, code) + require.Zero(t, code, "unexpected exit code: %d", code) } content, err = os.ReadFile(filename) - must.NoError(t, err) + require.NoError(t, err) shortJob := asset.JobExampleShort - must.Eq(t, string(content), string(shortJob)) + require.Equal(t, string(content), string(shortJob)) // Fails if the file exists if code := cmd.Run([]string{filename}); code != 1 { diff --git a/command/job_inspect_test.go b/command/job_inspect_test.go index 9f33817a85f8..c86f733c4964 100644 --- a/command/job_inspect_test.go +++ b/command/job_inspect_test.go @@ -15,6 +15,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestInspectCommand_Implements(t *testing.T) { @@ -68,6 +69,7 @@ func TestInspectCommand_Fails(t *testing.T) { func TestInspectCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -77,15 +79,15 @@ func TestInspectCommand_AutocompleteArgs(t *testing.T) { state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(j.ID, res[0]) } func TestJobInspectCommand_ACL(t *testing.T) { diff --git a/command/job_periodic_force_test.go b/command/job_periodic_force_test.go index 730240ed4b27..8ce8a867b0b9 100644 --- a/command/job_periodic_force_test.go +++ b/command/job_periodic_force_test.go @@ -17,6 +17,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestJobPeriodicForceCommand_Implements(t *testing.T) { @@ -31,15 +32,15 @@ func TestJobPeriodicForceCommand_Fails(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) + require.Equal(t, code, 1, "expected error") out := ui.ErrorWriter.String() - must.StrContains(t, out, commandErrorText(cmd)) + require.Contains(t, out, commandErrorText(cmd), "expected help output") ui.ErrorWriter.Reset() code = cmd.Run([]string{"-address=nope", "12"}) - must.One(t, code) + require.Equal(t, code, 1, "expected error") out = ui.ErrorWriter.String() - must.StrContains(t, out, "Error querying job prefix") + require.Contains(t, out, "Error querying job prefix", "expected force error") } func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { @@ -54,12 +55,12 @@ func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { // Create a fake job, not periodic state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) predictor := cmd.AutocompleteArgs() res := predictor.Predict(complete.Args{Last: j.ID[:len(j.ID)-5]}) - must.SliceEmpty(t, res) + require.Empty(t, res) // Create another fake job, periodic state = srv.Agent.Server().State() @@ -71,13 +72,13 @@ func TestJobPeriodicForceCommand_AutocompleteArgs(t *testing.T) { ProhibitOverlap: true, TimeZone: "test zone", } - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j2)) + require.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j2)) res = predictor.Predict(complete.Args{Last: j2.ID[:len(j.ID)-5]}) - must.Eq(t, []string{j2.ID}, res) + require.Equal(t, []string{j2.ID}, res) res = predictor.Predict(complete.Args{}) - must.Eq(t, []string{j2.ID}, res) + require.Equal(t, []string{j2.ID}, res) } func TestJobPeriodicForceCommand_NonPeriodicJob(t *testing.T) { @@ -97,7 +98,7 @@ func TestJobPeriodicForceCommand_NonPeriodicJob(t *testing.T) { } return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) // Register a job @@ -107,14 +108,14 @@ func TestJobPeriodicForceCommand_NonPeriodicJob(t *testing.T) { cmd := &JobPeriodicForceCommand{Meta: Meta{Ui: ui, flagAddress: url}} resp, _, err := client.Jobs().Register(j, nil) - must.NoError(t, err) + require.NoError(t, err) code := waitForSuccess(ui, client, fullId, t, resp.EvalID) - must.Zero(t, code) + require.Equal(t, 0, code) code = cmd.Run([]string{"-address=" + url, "job_not_periodic"}) - must.One(t, code) + require.Equal(t, 1, code, "expected exit code") out := ui.ErrorWriter.String() - must.StrContains(t, out, "No periodic job(s)") + require.Contains(t, out, "No periodic job(s)", "non-periodic error message") } func TestJobPeriodicForceCommand_SuccessfulPeriodicForceDetach(t *testing.T) { @@ -134,7 +135,7 @@ func TestJobPeriodicForceCommand_SuccessfulPeriodicForceDetach(t *testing.T) { } return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) // Register a job @@ -150,13 +151,13 @@ func TestJobPeriodicForceCommand_SuccessfulPeriodicForceDetach(t *testing.T) { cmd := &JobPeriodicForceCommand{Meta: Meta{Ui: ui, flagAddress: url}} _, _, err := client.Jobs().Register(j, nil) - must.NoError(t, err) + require.NoError(t, err) code := cmd.Run([]string{"-address=" + url, "-detach", "job1_is_periodic"}) - must.Zero(t, code) + require.Equal(t, 0, code, "expected no error code") out := ui.OutputWriter.String() - must.StrContains(t, out, "Force periodic successful") - must.StrContains(t, out, "Evaluation ID:") + require.Contains(t, out, "Force periodic successful") + require.Contains(t, out, "Evaluation ID:") } func TestJobPeriodicForceCommand_SuccessfulPeriodicForce(t *testing.T) { @@ -176,7 +177,7 @@ func TestJobPeriodicForceCommand_SuccessfulPeriodicForce(t *testing.T) { } return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) // Register a job @@ -192,13 +193,13 @@ func TestJobPeriodicForceCommand_SuccessfulPeriodicForce(t *testing.T) { cmd := &JobPeriodicForceCommand{Meta: Meta{Ui: ui, flagAddress: url}} _, _, err := client.Jobs().Register(j, nil) - must.NoError(t, err) + require.NoError(t, err) code := cmd.Run([]string{"-address=" + url, "job2_is_periodic"}) - must.Zero(t, code) + require.Equal(t, 0, code, "expected no error code") out := ui.OutputWriter.String() - must.StrContains(t, out, "Monitoring evaluation") - must.StrContains(t, out, "finished with status \"complete\"") + require.Contains(t, out, "Monitoring evaluation") + require.Contains(t, out, "finished with status \"complete\"") } func TestJobPeriodicForceCommand_SuccessfulIfJobIDEqualsPrefix(t *testing.T) { @@ -218,7 +219,7 @@ func TestJobPeriodicForceCommand_SuccessfulIfJobIDEqualsPrefix(t *testing.T) { } return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) j1 := testJob("periodic-prefix") @@ -240,15 +241,15 @@ func TestJobPeriodicForceCommand_SuccessfulIfJobIDEqualsPrefix(t *testing.T) { cmd := &JobPeriodicForceCommand{Meta: Meta{Ui: ui, flagAddress: url}} _, _, err := client.Jobs().Register(j1, nil) - must.NoError(t, err) + require.NoError(t, err) _, _, err = client.Jobs().Register(j2, nil) - must.NoError(t, err) + require.NoError(t, err) code := cmd.Run([]string{"-address=" + url, "periodic-prefix"}) - must.Zero(t, code) + require.Equal(t, 0, code, "expected no error code") out := ui.OutputWriter.String() - must.StrContains(t, out, "Monitoring evaluation") - must.StrContains(t, out, "finished with status \"complete\"") + require.Contains(t, out, "Monitoring evaluation") + require.Contains(t, out, "finished with status \"complete\"") } func TestJobPeriodicForceCommand_ACL(t *testing.T) { diff --git a/command/job_plan_test.go b/command/job_plan_test.go index 81f3bc315c68..343768b9b3a2 100644 --- a/command/job_plan_test.go +++ b/command/job_plan_test.go @@ -15,6 +15,7 @@ import ( "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestPlanCommand_Implements(t *testing.T) { @@ -131,7 +132,7 @@ func TestPlanCommand_hcl1_hcl2_strict(t *testing.T) { }) // Exit code 1 here means that an alloc will be created, which is // expected. - must.One(t, got) + require.Equal(t, 1, got) }) } @@ -198,7 +199,7 @@ func TestPlanCommand_From_Files(t *testing.T) { cmd := &JobPlanCommand{Meta: Meta{Ui: ui}} args := []string{"-address", "http://" + s.HTTPAddr, "testdata/example-basic.nomad"} code := cmd.Run(args) - must.One(t, code) // no client running, fail to place + require.Equal(t, 1, code) // no client running, fail to place must.StrContains(t, ui.OutputWriter.String(), "WARNING: Failed to place all allocations.") }) @@ -252,6 +253,7 @@ func TestPlanCommand_Preemptions(t *testing.T) { ci.Parallel(t) ui := cli.NewMockUi() cmd := &JobPlanCommand{Meta: Meta{Ui: ui}} + require := require.New(t) // Only one preempted alloc resp1 := &api.JobPlanResponse{ @@ -269,8 +271,8 @@ func TestPlanCommand_Preemptions(t *testing.T) { } cmd.addPreemptions(resp1) out := ui.OutputWriter.String() - must.StrContains(t, out, "Alloc ID") - must.StrContains(t, out, "alloc1") + require.Contains(out, "Alloc ID") + require.Contains(out, "alloc1") // Less than 10 unique job ids var preemptedAllocs []*api.AllocationListStub @@ -294,8 +296,8 @@ func TestPlanCommand_Preemptions(t *testing.T) { ui.OutputWriter.Reset() cmd.addPreemptions(resp2) out = ui.OutputWriter.String() - must.StrContains(t, out, "Job ID") - must.StrContains(t, out, "Namespace") + require.Contains(out, "Job ID") + require.Contains(out, "Namespace") // More than 10 unique job IDs preemptedAllocs = make([]*api.AllocationListStub, 0) @@ -325,9 +327,9 @@ func TestPlanCommand_Preemptions(t *testing.T) { ui.OutputWriter.Reset() cmd.addPreemptions(resp3) out = ui.OutputWriter.String() - must.StrContains(t, out, "Job Type") - must.StrContains(t, out, "batch") - must.StrContains(t, out, "service") + require.Contains(out, "Job Type") + require.Contains(out, "batch") + require.Contains(out, "service") } func TestPlanCommand_JSON(t *testing.T) { @@ -342,6 +344,6 @@ func TestPlanCommand_JSON(t *testing.T) { "testdata/example-short.json", } code := cmd.Run(args) - must.Eq(t, 255, code) - must.StrContains(t, ui.ErrorWriter.String(), "Error during plan: Put") + require.Equal(t, 255, code) + require.Contains(t, ui.ErrorWriter.String(), "Error during plan: Put") } diff --git a/command/job_promote_test.go b/command/job_promote_test.go index 299bb370438a..f3d676498bd0 100644 --- a/command/job_promote_test.go +++ b/command/job_promote_test.go @@ -10,11 +10,13 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" - "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" + "github.com/shoenig/test/must" + + "github.com/hashicorp/nomad/nomad/mock" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestJobPromoteCommand_Implements(t *testing.T) { @@ -47,6 +49,7 @@ func TestJobPromoteCommand_Fails(t *testing.T) { func TestJobPromoteCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -57,15 +60,15 @@ func TestJobPromoteCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(j.ID, res[0]) } func TestJobPromoteCommand_ACL(t *testing.T) { diff --git a/command/job_restart_test.go b/command/job_restart_test.go index 4104fd87c39b..1af1c76f6737 100644 --- a/command/job_restart_test.go +++ b/command/job_restart_test.go @@ -25,6 +25,7 @@ import ( "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" + "github.com/shoenig/test/must" "github.com/shoenig/test/wait" ) diff --git a/command/job_revert_test.go b/command/job_revert_test.go index b55c48373a97..0f9472d27929 100644 --- a/command/job_revert_test.go +++ b/command/job_revert_test.go @@ -15,6 +15,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestJobRevertCommand_Implements(t *testing.T) { @@ -47,6 +48,7 @@ func TestJobRevertCommand_Fails(t *testing.T) { func TestJobRevertCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -57,15 +59,15 @@ func TestJobRevertCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(j.ID, res[0]) } func TestJobRevertCommand_ACL(t *testing.T) { diff --git a/command/job_run_test.go b/command/job_run_test.go index cdb056831ede..985fefd4eef2 100644 --- a/command/job_run_test.go +++ b/command/job_run_test.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) var _ cli.Command = (*JobRunCommand)(nil) @@ -70,7 +70,7 @@ func TestRunCommand_hcl1_hcl2_strict(t *testing.T) { "-detach", "asset/example-short.nomad.hcl", }) - must.Zero(t, got) + require.Equal(t, 0, got, ui.ErrorWriter.String()) }) } @@ -261,13 +261,13 @@ func TestRunCommand_JSON(t *testing.T) { // First convert HCL -> JSON with -output stdout, stderr, code := run("-output", "asset/example-short.nomad.hcl") - must.Zero(t, code) - must.Eq(t, "", stderr) - must.NotEq(t, "", stdout) + require.Zero(t, code, stderr) + require.Empty(t, stderr) + require.NotEmpty(t, stdout) t.Logf("run -output==> %s...", stdout[:12]) jsonFile := filepath.Join(t.TempDir(), "redis.json") - must.NoError(t, os.WriteFile(jsonFile, []byte(stdout), 0o640)) + require.NoError(t, os.WriteFile(jsonFile, []byte(stdout), 0o640)) // Wait for agent to start and get its address addr := "" @@ -279,23 +279,23 @@ func TestRunCommand_JSON(t *testing.T) { // Submit JSON stdout, stderr, code = run("-detach", "-address", addr, "-json", jsonFile) - must.Zero(t, code) - must.Eq(t, "", stderr) + require.Zero(t, code, stderr) + require.Empty(t, stderr) // Read the JSON from the API as it omits the Job envelope and // therefore differs from -output resp, err := http.Get(addr + "/v1/job/example") - must.NoError(t, err) + require.NoError(t, err) buf, err := io.ReadAll(resp.Body) - must.NoError(t, err) - must.NoError(t, resp.Body.Close()) - must.SliceNotEmpty(t, buf) + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.NotEmpty(t, buf) t.Logf("/v1/job/example==> %s...", string(buf[:12])) - must.NoError(t, os.WriteFile(jsonFile, buf, 0o640)) + require.NoError(t, os.WriteFile(jsonFile, buf, 0o640)) // Submit JSON stdout, stderr, code = run("-detach", "-address", addr, "-json", jsonFile) - must.Zero(t, code) - must.Eq(t, "", stderr) - must.NotEq(t, "", stdout) + require.Zerof(t, code, "stderr: %s\njson: %s\n", stderr, string(buf)) + require.Empty(t, stderr) + require.NotEmpty(t, stdout) } diff --git a/command/job_status_test.go b/command/job_status_test.go index 90800a527867..7441ff066e67 100644 --- a/command/job_status_test.go +++ b/command/job_status_test.go @@ -19,6 +19,8 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJobStatusCommand_Implements(t *testing.T) { @@ -164,7 +166,7 @@ func TestJobStatusCommand_Run(t *testing.T) { nodeNameHeaderStr := "Node Name" nodeNameHeaderIndex := strings.Index(allocationsTableStr, nodeNameHeaderStr) nodeNameRegexpStr := fmt.Sprintf(`.*%s.*\n.{%d}%s`, nodeNameHeaderStr, nodeNameHeaderIndex, regexp.QuoteMeta(nodeName)) - must.RegexMatch(t, regexp.MustCompile(nodeNameRegexpStr), out) + require.Regexp(t, regexp.MustCompile(nodeNameRegexpStr), out) ui.ErrorWriter.Reset() ui.OutputWriter.Reset() @@ -252,6 +254,7 @@ func TestJobStatusCommand_Fails(t *testing.T) { func TestJobStatusCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -262,19 +265,20 @@ func TestJobStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) prefix := j.ID[:len(j.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceLen(t, 1, res) - must.Eq(t, j.ID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(j.ID, res[0]) } func TestJobStatusCommand_WithAccessPolicy(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) config := func(c *agent.Config) { c.ACL.Enabled = true @@ -285,7 +289,7 @@ func TestJobStatusCommand_WithAccessPolicy(t *testing.T) { // Bootstrap an initial ACL token token := srv.RootToken - must.NotNil(t, token) + assert.NotNil(token, "failed to bootstrap ACL token") // Wait for client ready client.SetSecretID(token.SecretID) @@ -316,22 +320,22 @@ func TestJobStatusCommand_WithAccessPolicy(t *testing.T) { // registering a job without a token fails client.SetSecretID(invalidToken.SecretID) resp, _, err := client.Jobs().Register(j, nil) - must.NotNil(t, err) + assert.NotNil(err) // registering a job with a valid token succeeds client.SetSecretID(token.SecretID) resp, _, err = client.Jobs().Register(j, nil) - must.NoError(t, err) + assert.Nil(err) code := waitForSuccess(ui, client, fullId, t, resp.EvalID) - must.Zero(t, code) + assert.Equal(0, code) // Request Job List without providing a valid token code = cmd.Run([]string{"-address=" + url, "-token=" + invalidToken.SecretID, "-short"}) - must.One(t, code) + assert.Equal(1, code) // Request Job List with a valid token code = cmd.Run([]string{"-address=" + url, "-token=" + token.SecretID, "-short"}) - must.Zero(t, code) + assert.Equal(0, code) out := ui.OutputWriter.String() if !strings.Contains(out, *j.ID) { @@ -363,15 +367,16 @@ func TestJobStatusCommand_RescheduleEvals(t *testing.T) { ui := cli.NewMockUi() cmd := &JobStatusCommand{Meta: Meta{Ui: ui, flagAddress: url}} + require := require.New(t) state := srv.Agent.Server().State() // Create state store objects for job, alloc and followup eval with a future WaitUntil value j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 900, nil, j)) + require.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 900, nil, j)) e := mock.Eval() e.WaitUntil = time.Now().Add(1 * time.Hour) - must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 902, []*structs.Evaluation{e})) + require.Nil(state.UpsertEvals(structs.MsgTypeTestSetup, 902, []*structs.Evaluation{e})) a := mock.Alloc() a.Job = j a.JobID = j.ID @@ -380,15 +385,15 @@ func TestJobStatusCommand_RescheduleEvals(t *testing.T) { a.Metrics = &structs.AllocMetric{} a.DesiredStatus = structs.AllocDesiredStatusRun a.ClientStatus = structs.AllocClientStatusRunning - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) + require.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{a})) // Query jobs with prefix match if code := cmd.Run([]string{"-address=" + url, j.ID}); code != 0 { t.Fatalf("expected exit 0, got: %d", code) } out := ui.OutputWriter.String() - must.StrContains(t, out, "Future Rescheduling Attempts") - must.StrContains(t, out, e.ID[:8]) + require.Contains(out, "Future Rescheduling Attempts") + require.Contains(out, e.ID[:8]) } func TestJobStatusCommand_ACL(t *testing.T) { diff --git a/command/job_validate_test.go b/command/job_validate_test.go index 6adc2ae3031d..c09b9e8f5938 100644 --- a/command/job_validate_test.go +++ b/command/job_validate_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestValidateCommand_Implements(t *testing.T) { @@ -40,7 +40,7 @@ func TestValidateCommand_Files(t *testing.T) { cmd := &JobValidateCommand{Meta: Meta{Ui: ui, flagAddress: "http://" + s.HTTPAddr}} args := []string{"testdata/example-basic.nomad"} code := cmd.Run(args) - must.Zero(t, code) + require.Equal(t, 0, code) }) t.Run("vault no token", func(t *testing.T) { @@ -48,8 +48,8 @@ func TestValidateCommand_Files(t *testing.T) { cmd := &JobValidateCommand{Meta: Meta{Ui: ui}} args := []string{"-address", "http://" + s.HTTPAddr, "testdata/example-vault.nomad"} code := cmd.Run(args) - must.StrContains(t, ui.ErrorWriter.String(), "* Vault used in the job but missing Vault token") - must.One(t, code) + require.Contains(t, ui.ErrorWriter.String(), "* Vault used in the job but missing Vault token") + require.Equal(t, 1, code) }) t.Run("vault bad token via flag", func(t *testing.T) { @@ -57,8 +57,8 @@ func TestValidateCommand_Files(t *testing.T) { cmd := &JobValidateCommand{Meta: Meta{Ui: ui}} args := []string{"-address", "http://" + s.HTTPAddr, "-vault-token=abc123", "testdata/example-vault.nomad"} code := cmd.Run(args) - must.StrContains(t, ui.ErrorWriter.String(), "* bad token") - must.One(t, code) + require.Contains(t, ui.ErrorWriter.String(), "* bad token") + require.Equal(t, 1, code) }) t.Run("vault token bad via env", func(t *testing.T) { @@ -67,8 +67,8 @@ func TestValidateCommand_Files(t *testing.T) { cmd := &JobValidateCommand{Meta: Meta{Ui: ui}} args := []string{"-address", "http://" + s.HTTPAddr, "testdata/example-vault.nomad"} code := cmd.Run(args) - must.StrContains(t, ui.ErrorWriter.String(), "* bad token") - must.One(t, code) + require.Contains(t, ui.ErrorWriter.String(), "* bad token") + require.Equal(t, 1, code) }) } func TestValidateCommand_hcl1_hcl2_strict(t *testing.T) { @@ -84,7 +84,7 @@ func TestValidateCommand_hcl1_hcl2_strict(t *testing.T) { "-address", addr, "asset/example-short.nomad.hcl", }) - must.Zero(t, got) + require.Equal(t, 0, got, ui.ErrorWriter.String()) }) } @@ -221,9 +221,11 @@ func TestValidateCommand_JSON(t *testing.T) { code := cmd.Run([]string{"-address", addr, "-json", "testdata/example-short.json"}) - must.Zero(t, code) + require.Zerof(t, code, "stdout: %s\nstdout: %s\n", + ui.OutputWriter.String(), ui.ErrorWriter.String()) code = cmd.Run([]string{"-address", addr, "-json", "testdata/example-short-bad.json"}) - must.One(t, code) + require.Equalf(t, 1, code, "stdout: %s\nstdout: %s\n", + ui.OutputWriter.String(), ui.ErrorWriter.String()) } diff --git a/command/license_get_test.go b/command/license_get_test.go index fed0d4594403..60d3fce7e347 100644 --- a/command/license_get_test.go +++ b/command/license_get_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) var _ cli.Command = &LicenseGetCommand{} @@ -26,10 +26,10 @@ func TestCommand_LicenseGet_OSSErr(t *testing.T) { code := cmd.Run([]string{"-address=" + url}) if srv.Enterprise { - must.Zero(t, code) + require.Equal(t, 0, code) } else { - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "Nomad Enterprise only endpoint") + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "Nomad Enterprise only endpoint") } } @@ -55,9 +55,9 @@ func TestOutputLicenseReply(t *testing.T) { ui := cli.NewMockUi() - must.Zero(t, OutputLicenseReply(ui, lic)) + require.Equal(t, 0, OutputLicenseReply(ui, lic)) out := ui.OutputWriter.String() - must.StrContains(t, out, "Customer ID") - must.StrContains(t, out, "License ID") + require.Contains(t, out, "Customer ID") + require.Contains(t, out, "License ID") } diff --git a/command/meta_test.go b/command/meta_test.go index dd07837dcf9d..6a8d0d43820f 100644 --- a/command/meta_test.go +++ b/command/meta_test.go @@ -16,6 +16,7 @@ import ( "github.com/hashicorp/nomad/helper/pointer" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestMeta_FlagSet(t *testing.T) { @@ -137,7 +138,7 @@ func TestMeta_Colorize(t *testing.T) { t.Run(tc.Name, func(t *testing.T) { // Create fake test terminal. _, tty, err := pty.Open() - must.NoError(t, err) + require.NoError(t, err) defer tty.Close() oldStdout := os.Stdout @@ -154,7 +155,7 @@ func TestMeta_Colorize(t *testing.T) { tc.SetupFn(t, m) } - must.Eq(t, !tc.ExpectColor, m.Colorize().Disable) + require.Equal(t, !tc.ExpectColor, m.Colorize().Disable) }) } } diff --git a/command/monitor_test.go b/command/monitor_test.go index 1466b20b233d..932d3e9715fb 100644 --- a/command/monitor_test.go +++ b/command/monitor_test.go @@ -16,6 +16,7 @@ import ( "github.com/mitchellh/cli" "github.com/shoenig/test/must" "github.com/shoenig/test/wait" + "github.com/stretchr/testify/require" ) func TestMonitor_Update_Eval(t *testing.T) { @@ -378,7 +379,7 @@ node-3 0 0 0 4 3 for _, tc := range tests { t.Run(tc.Name, func(t *testing.T) { got := formatAllocMetrics(tc.Metrics, true, "") - must.Eq(t, strings.TrimSpace(tc.Expected), got) + require.Equal(t, strings.TrimSpace(tc.Expected), got) }) } } diff --git a/command/namespace_apply_test.go b/command/namespace_apply_test.go index a93518f16f2a..2699e128587f 100644 --- a/command/namespace_apply_test.go +++ b/command/namespace_apply_test.go @@ -11,6 +11,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestNamespaceApplyCommand_Implements(t *testing.T) { @@ -58,8 +59,8 @@ func TestNamespaceApplyCommand_Good(t *testing.T) { } namespaces, _, err := client.Namespaces().List(nil) - must.NoError(t, err) - must.SliceLen(t, 2, namespaces) + assert.Nil(t, err) + assert.Len(t, namespaces, 2) } func TestNamespaceApplyCommand_parseNamesapceSpec(t *testing.T) { diff --git a/command/namespace_delete_test.go b/command/namespace_delete_test.go index d0bc5a06a113..a87977e6fa22 100644 --- a/command/namespace_delete_test.go +++ b/command/namespace_delete_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestNamespaceDeleteCommand_Implements(t *testing.T) { @@ -57,7 +57,7 @@ func TestNamespaceDeleteCommand_Good(t *testing.T) { Name: "foo", } _, err := client.Namespaces().Register(ns, nil) - must.NoError(t, err) + assert.Nil(t, err) // Delete a namespace if code := cmd.Run([]string{"-address=" + url, ns.Name}); code != 0 { @@ -65,12 +65,13 @@ func TestNamespaceDeleteCommand_Good(t *testing.T) { } namespaces, _, err := client.Namespaces().List(nil) - must.NoError(t, err) - must.Len(t, 1, namespaces) + assert.Nil(t, err) + assert.Len(t, namespaces, 1) } func TestNamespaceDeleteCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -83,12 +84,12 @@ func TestNamespaceDeleteCommand_AutocompleteArgs(t *testing.T) { Name: "diddo", } _, err := client.Namespaces().Register(ns, nil) - must.NoError(t, err) + assert.Nil(err) args := complete.Args{Last: "d"} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, ns.Name, res[0]) + assert.Equal(1, len(res)) + assert.Equal(ns.Name, res[0]) } diff --git a/command/namespace_inspect_test.go b/command/namespace_inspect_test.go index f7554db7ff9c..fc6851df9e6f 100644 --- a/command/namespace_inspect_test.go +++ b/command/namespace_inspect_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestNamespaceInspectCommand_Implements(t *testing.T) { @@ -57,7 +57,7 @@ func TestNamespaceInspectCommand_Good(t *testing.T) { Name: "foo", } _, err := client.Namespaces().Register(ns, nil) - must.NoError(t, err) + assert.Nil(t, err) // Inspect if code := cmd.Run([]string{"-address=" + url, ns.Name}); code != 0 { @@ -72,6 +72,7 @@ func TestNamespaceInspectCommand_Good(t *testing.T) { func TestNamespaceInspectCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -84,14 +85,14 @@ func TestNamespaceInspectCommand_AutocompleteArgs(t *testing.T) { Name: "foo", } _, err := client.Namespaces().Register(ns, nil) - must.NoError(t, err) + assert.Nil(err) args := complete.Args{Last: "f"} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, ns.Name, res[0]) + assert.Equal(1, len(res)) + assert.Equal(ns.Name, res[0]) } // This test should demonstrate the behavior of a namespace @@ -111,24 +112,27 @@ func TestNamespaceInspectCommand_NamespaceMatchesPrefix(t *testing.T) { // Create a namespace that uses foo as a prefix ns := &api.Namespace{Name: "fooBar"} _, err := client.Namespaces().Register(ns, nil) - must.NoError(t, err) + assert.Nil(t, err) // Create a foo namespace ns2 := &api.Namespace{Name: "foo"} _, err = client.Namespaces().Register(ns2, nil) - must.NoError(t, err) + assert.Nil(t, err) // Adding a NS after to prevent sort from creating // false successes ns = &api.Namespace{Name: "fooBaz"} _, err = client.Namespaces().Register(ns, nil) - must.NoError(t, err) + assert.Nil(t, err) // Check status on namespace code := cmd.Run([]string{"-address=" + url, ns2.Name}) - must.Zero(t, code) - + if code != 0 { + t.Fatalf("expected exit 0, got: %d; %v", code, ui.ErrorWriter.String()) + } // Check to ensure we got the proper foo out := ui.OutputWriter.String() - must.StrContains(t, out, "\"foo\",\n") + if !strings.Contains(out, "\"foo\",\n") { + t.Fatalf("expected namespace foo, got: %s", out) + } } diff --git a/command/namespace_status_test.go b/command/namespace_status_test.go index a1264d48a98c..7495f68be2d0 100644 --- a/command/namespace_status_test.go +++ b/command/namespace_status_test.go @@ -13,6 +13,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestNamespaceStatusCommand_Implements(t *testing.T) { @@ -107,7 +108,7 @@ func TestNamespaceStatusCommand_Run_Quota(t *testing.T) { // Create a quota to delete qs := testQuotaSpec() _, err := client.Quotas().Register(qs, nil) - must.NoError(t, err) + assert.Nil(t, err) // Create a namespace ns := &api.Namespace{ @@ -115,7 +116,7 @@ func TestNamespaceStatusCommand_Run_Quota(t *testing.T) { Quota: qs.Name, } _, err = client.Namespaces().Register(ns, nil) - must.NoError(t, err) + assert.Nil(t, err) // Check status on namespace code := cmd.Run([]string{"-address=" + url, ns.Name}) diff --git a/command/node_drain_test.go b/command/node_drain_test.go index 4e5a1bafc1bf..34e767828df0 100644 --- a/command/node_drain_test.go +++ b/command/node_drain_test.go @@ -17,7 +17,8 @@ import ( "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestNodeDrainCommand_Implements(t *testing.T) { @@ -27,7 +28,7 @@ func TestNodeDrainCommand_Implements(t *testing.T) { func TestNodeDrainCommand_Detach(t *testing.T) { ci.Parallel(t) - + require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_detach_node" }) @@ -71,7 +72,7 @@ func TestNodeDrainCommand_Detach(t *testing.T) { } _, _, err := client.Jobs().Register(job, nil) - must.NoError(t, err) + require.Nil(err) testutil.WaitForResult(func() (bool, error) { allocs, _, err := client.Nodes().Allocations(nodeID, nil) @@ -91,16 +92,16 @@ func TestNodeDrainCommand_Detach(t *testing.T) { out := ui.OutputWriter.String() expected := "drain strategy set" - must.StrContains(t, out, expected) + require.Contains(out, expected) node, _, err := client.Nodes().Info(nodeID, nil) - must.NoError(t, err) - must.NotNil(t, node.DrainStrategy) + require.Nil(err) + require.NotNil(node.DrainStrategy) } func TestNodeDrainCommand_Monitor(t *testing.T) { ci.Parallel(t) - + require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_monitor_node" }) @@ -160,7 +161,7 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { } _, _, err := client.Jobs().Register(job, nil) - must.NoError(t, err) + require.Nil(err) // Register a system job to ensure it is ignored during draining sysjob := &api.Job{ @@ -190,7 +191,7 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { } _, _, err = client.Jobs().Register(sysjob, nil) - must.NoError(t, err) + require.Nil(err) var allocs []*api.Allocation testutil.WaitForResult(func() (bool, error) { @@ -220,7 +221,7 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { cmd := &NodeDrainCommand{Meta: Meta{Ui: ui}} args := []string{"-address=" + url, "-self", "-enable", "-deadline", "1s", "-ignore-system"} t.Logf("Running: %v", args) - must.Zero(t, cmd.Run(args)) + require.Zero(cmd.Run(args)) out := outBuf.String() t.Logf("Output:\n%s", out) @@ -229,7 +230,7 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { // monitor goroutines may start only after some or all the allocs have been // migrated. if !testutil.IsTravis() { - must.StrContains(t, out, "Drain complete for node") + require.Contains(out, "Drain complete for node") for _, a := range allocs { if *a.Job.Type == "system" { if strings.Contains(out, a.ID) { @@ -237,8 +238,8 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { } continue } - must.StrContains(t, out, fmt.Sprintf("Alloc %q marked for migration", a.ID)) - must.StrContains(t, out, fmt.Sprintf("Alloc %q draining", a.ID)) + require.Contains(out, fmt.Sprintf("Alloc %q marked for migration", a.ID)) + require.Contains(out, fmt.Sprintf("Alloc %q draining", a.ID)) } expected := fmt.Sprintf("All allocations on node %q have stopped\n", nodeID) @@ -251,16 +252,16 @@ func TestNodeDrainCommand_Monitor(t *testing.T) { outBuf.Reset() args = []string{"-address=" + url, "-self", "-monitor", "-ignore-system"} t.Logf("Running: %v", args) - must.Zero(t, cmd.Run(args)) + require.Zero(cmd.Run(args)) out = outBuf.String() t.Logf("Output:\n%s", out) - must.StrContains(t, out, "No drain strategy set") + require.Contains(out, "No drain strategy set") } func TestNodeDrainCommand_Monitor_NoDrainStrategy(t *testing.T) { ci.Parallel(t) - + require := require.New(t) server, client, url := testServer(t, true, func(c *agent.Config) { c.NodeName = "drain_monitor_node2" }) @@ -297,7 +298,7 @@ func TestNodeDrainCommand_Monitor_NoDrainStrategy(t *testing.T) { out := outBuf.String() t.Logf("Output:\n%s", out) - must.StrContains(t, out, "No drain strategy set") + require.Contains(out, "No drain strategy set") } func TestNodeDrainCommand_Fails(t *testing.T) { @@ -416,6 +417,7 @@ func TestNodeDrainCommand_Fails(t *testing.T) { func TestNodeDrainCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -444,6 +446,6 @@ func TestNodeDrainCommand_AutocompleteArgs(t *testing.T) { predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, nodeID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(nodeID, res[0]) } diff --git a/command/node_eligibility_test.go b/command/node_eligibility_test.go index 95de5dece25d..1aafb5d96c7a 100644 --- a/command/node_eligibility_test.go +++ b/command/node_eligibility_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestNodeEligibilityCommand_Implements(t *testing.T) { @@ -95,6 +95,7 @@ func TestNodeEligibilityCommand_Fails(t *testing.T) { func TestNodeEligibilityCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -123,6 +124,6 @@ func TestNodeEligibilityCommand_AutocompleteArgs(t *testing.T) { predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, nodeID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(nodeID, res[0]) } diff --git a/command/node_pool_jobs_test.go b/command/node_pool_jobs_test.go index a6dd93597d72..d16d0c9e57f6 100644 --- a/command/node_pool_jobs_test.go +++ b/command/node_pool_jobs_test.go @@ -8,13 +8,14 @@ import ( "strings" "testing" + "github.com/mitchellh/cli" + "github.com/shoenig/test" + "github.com/shoenig/test/must" + "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pointer" - "github.com/mitchellh/cli" - "github.com/shoenig/test" - "github.com/shoenig/test/must" ) func TestNodePoolJobsListCommand_Run(t *testing.T) { diff --git a/command/node_status_test.go b/command/node_status_test.go index cfddd03f45dd..12dbb6058a77 100644 --- a/command/node_status_test.go +++ b/command/node_status_test.go @@ -15,7 +15,7 @@ import ( "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestNodeStatusCommand_Implements(t *testing.T) { @@ -241,6 +241,7 @@ func TestNodeStatusCommand_Fails(t *testing.T) { func TestNodeStatusCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -269,29 +270,30 @@ func TestNodeStatusCommand_AutocompleteArgs(t *testing.T) { predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, nodeID, res[0]) + assert.Equal(1, len(res)) + assert.Equal(nodeID, res[0]) } func TestNodeStatusCommand_FormatDrain(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) node := &api.Node{} - must.Eq(t, "false", formatDrain(node)) + assert.Equal("false", formatDrain(node)) node.DrainStrategy = &api.DrainStrategy{} - must.Eq(t, "true; no deadline", formatDrain(node)) + assert.Equal("true; no deadline", formatDrain(node)) node.DrainStrategy = &api.DrainStrategy{} node.DrainStrategy.Deadline = -1 * time.Second - must.Eq(t, "true; force drain", formatDrain(node)) + assert.Equal("true; force drain", formatDrain(node)) // formatTime special cases Unix(0, 0), so increment by 1 node.DrainStrategy = &api.DrainStrategy{} node.DrainStrategy.ForceDeadline = time.Unix(1, 0).UTC() - must.Eq(t, "true; 1970-01-01T00:00:01Z deadline", formatDrain(node)) + assert.Equal("true; 1970-01-01T00:00:01Z deadline", formatDrain(node)) node.DrainStrategy.IgnoreSystemJobs = true - must.Eq(t, "true; 1970-01-01T00:00:01Z deadline; ignoring system jobs", formatDrain(node)) + assert.Equal("true; 1970-01-01T00:00:01Z deadline; ignoring system jobs", formatDrain(node)) } diff --git a/command/operator_api_test.go b/command/operator_api_test.go index 5f30b2572b54..4d097b0d6877 100644 --- a/command/operator_api_test.go +++ b/command/operator_api_test.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) // TestOperatorAPICommand_Paths asserts that the op api command normalizes @@ -44,11 +45,11 @@ func TestOperatorAPICommand_Paths(t *testing.T) { // Assert that absolute paths are appended to the configured address exitCode := cmd.Run([]string{"-address=" + ts.URL, "/v1/jobs"}) - must.Zero(t, exitCode) + require.Zero(t, exitCode, buf.String()) select { case hit := <-hits: - must.Eq(t, expected, hit.String()) + require.Equal(t, expected, hit.String()) case <-time.After(10 * time.Second): t.Fatalf("timed out waiting for hit") } @@ -58,11 +59,11 @@ func TestOperatorAPICommand_Paths(t *testing.T) { // Assert that full URLs are used as-is even if an invalid address is // set. exitCode = cmd.Run([]string{"-address=ftp://127.0.0.2:1", ts.URL + "/v1/jobs"}) - must.Zero(t, exitCode) + require.Zero(t, exitCode, buf.String()) select { case hit := <-hits: - must.Eq(t, expected, hit.String()) + require.Equal(t, expected, hit.String()) case <-time.After(10 * time.Second): t.Fatalf("timed out waiting for hit") } @@ -72,11 +73,11 @@ func TestOperatorAPICommand_Paths(t *testing.T) { // Assert that URLs lacking a scheme are used even if an invalid // address is set. exitCode = cmd.Run([]string{"-address=ftp://127.0.0.2:1", ts.Listener.Addr().String() + "/v1/jobs"}) - must.Zero(t, exitCode) + require.Zero(t, exitCode, buf.String()) select { case hit := <-hits: - must.Eq(t, expected, hit.String()) + require.Equal(t, expected, hit.String()) case <-time.After(10 * time.Second): t.Fatalf("timed out waiting for hit") } @@ -104,7 +105,7 @@ func TestOperatorAPICommand_Curl(t *testing.T) { "-H", "Some-Other-Header: ok", "/url", }) - must.Zero(t, exitCode) + require.Zero(t, exitCode, buf.String()) expected := `curl \ -X POST \ @@ -112,7 +113,7 @@ func TestOperatorAPICommand_Curl(t *testing.T) { -H 'X-Nomad-Token: acl-token' \ http://127.0.0.1:1/url?filter=this+%3D%3D+%22that%22+or+this+%21%3D+%22foo%22®ion=not+even+a+valid+region ` - must.Eq(t, expected, buf.String()) + require.Equal(t, expected, buf.String()) } func Test_pathToURL(t *testing.T) { @@ -191,13 +192,13 @@ func TestOperatorAPICommand_ContentLength(t *testing.T) { // Setup a temp file to act as stdin. input := []byte("test-input") fakeStdin, err := os.CreateTemp("", "fake-stdin") - must.NoError(t, err) + require.NoError(t, err) defer os.Remove(fakeStdin.Name()) _, err = fakeStdin.Write(input) - must.NoError(t, err) + require.NoError(t, err) _, err = fakeStdin.Seek(0, 0) - must.NoError(t, err) + require.NoError(t, err) // Override the package's Stdin variable for testing. Stdin = fakeStdin @@ -213,11 +214,11 @@ func TestOperatorAPICommand_ContentLength(t *testing.T) { // Assert that a request has the expected content length. exitCode := cmd.Run([]string{"-address=" + ts.URL, "/v1/jobs"}) - must.Zero(t, exitCode) + require.Zero(t, exitCode, buf.String()) select { case l := <-contentLength: - must.Eq(t, len(input), l) + require.Equal(t, len(input), l) case <-time.After(10 * time.Second): t.Fatalf("timed out waiting for request") } diff --git a/command/operator_autopilot_health.go b/command/operator_autopilot_health.go deleted file mode 100644 index 4b9e0c893de9..000000000000 --- a/command/operator_autopilot_health.go +++ /dev/null @@ -1,188 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/hashicorp/nomad/api" - "github.com/posener/complete" -) - -type OperatorAutopilotHealthCommand struct { - Meta -} - -func (c *OperatorAutopilotHealthCommand) AutocompleteFlags() complete.Flags { - return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient)) -} - -func (c *OperatorAutopilotHealthCommand) AutocompleteArgs() complete.Predictor { - return mergeAutocompleteFlags(c.Meta.AutocompleteFlags(FlagSetClient), - complete.Flags{ - "-json": complete.PredictNothing, - }) -} - -func (c *OperatorAutopilotHealthCommand) Name() string { return "operator autopilot health" } -func (c *OperatorAutopilotHealthCommand) Run(args []string) int { - var fJson bool - flags := c.Meta.FlagSet("autopilot", FlagSetClient) - flags.Usage = func() { c.Ui.Output(c.Help()) } - flags.BoolVar(&fJson, "json", false, "") - - if err := flags.Parse(args); err != nil { - c.Ui.Error(fmt.Sprintf("Failed to parse args: %v", err)) - return 1 - } - - // Set up a client. - client, err := c.Meta.Client() - if err != nil { - c.Ui.Error(fmt.Sprintf("Error initializing client: %s", err)) - return 1 - } - - // Fetch the current configuration. - state, _, err := client.Operator().AutopilotServerHealth(nil) - if err != nil { - c.Ui.Error(fmt.Sprintf("Error querying Autopilot configuration: %s", err)) - return 1 - } - if fJson { - bytes, err := json.Marshal(state) - if err != nil { - c.Ui.Error(fmt.Sprintf("failed to serialize client state: %v", err)) - return 1 - } - c.Ui.Output(string(bytes)) - } - - c.Ui.Output(formatAutopilotState(state)) - - return 0 -} - -func (c *OperatorAutopilotHealthCommand) Synopsis() string { - return "Display the current Autopilot health" -} - -func (c *OperatorAutopilotHealthCommand) Help() string { - helpText := ` -Usage: nomad operator autopilot health [options] - - Displays the current Autopilot state. - - If ACLs are enabled, this command requires a token with the 'operator:read' - capability. - -General Options: - -Output Options: - - -json - Output the autopilot health in JSON format. - - ` + generalOptionsUsage(usageOptsDefault|usageOptsNoNamespace) - - return strings.TrimSpace(helpText) -} - -func formatAutopilotState(state *api.OperatorHealthReply) string { - var out string - out = fmt.Sprintf("Healthy: %t\n", state.Healthy) - out = out + fmt.Sprintf("FailureTolerance: %d\n", state.FailureTolerance) - out = out + fmt.Sprintf("Leader: %s\n", state.Leader) - out = out + fmt.Sprintf("Voters: \n\t%s\n", renderServerIDList(state.Voters)) - out = out + fmt.Sprintf("Servers: \n%s\n", formatServerHealth(state.Servers)) - - out = formatCommandToEnt(out, state) - return out -} - -func formatVoters(voters []string) string { - out := make([]string, len(voters)) - for i, p := range voters { - out[i] = fmt.Sprintf("\t%s", p) - } - return formatList(out) -} - -func formatServerHealth(servers []api.ServerHealth) string { - out := make([]string, len(servers)+1) - out[0] = "ID|Name|Address|SerfStatus|Version|Leader|Voter|Healthy|LastContact|LastTerm|LastIndex|StableSince" - for i, p := range servers { - out[i+1] = fmt.Sprintf("%s|%s|%s|%s|%s|%t|%t|%t|%s|%d|%d|%s", - p.ID, - p.Name, - p.Address, - p.SerfStatus, - p.Version, - p.Leader, - p.Voter, - p.Healthy, - p.LastContact, - p.LastTerm, - p.LastIndex, - p.StableSince, - ) - } - return formatList(out) -} - -func renderServerIDList(ids []string) string { - rows := make([]string, len(ids)) - for i, id := range ids { - rows[i] = fmt.Sprintf("\t%s", id) - } - return formatList(rows) -} - -func formatCommandToEnt(out string, state *api.OperatorHealthReply) string { - if len(state.ReadReplicas) > 0 { - out = out + "\nReadReplicas:" - out = out + formatList(state.ReadReplicas) - } - - if len(state.RedundancyZones) > 0 { - out = out + "\nRedundancyZones:" - for _, zone := range state.RedundancyZones { - out = out + fmt.Sprintf(" %v", zone) - } - } - - if state.Upgrade != nil { - out = out + "Upgrade: \n" - out = out + fmt.Sprintf(" \tStatus: %v\n", state.Upgrade.Status) - out = out + fmt.Sprintf(" \tTargetVersion: %v\n", state.Upgrade.TargetVersion) - if len(state.Upgrade.TargetVersionVoters) > 0 { - out = out + fmt.Sprintf(" \tTargetVersionVoters: \n\t\t%s\n", renderServerIDList(state.Upgrade.TargetVersionVoters)) - } - if len(state.Upgrade.TargetVersionNonVoters) > 0 { - out = out + fmt.Sprintf(" \tTargetVersionNonVoters: \n\t\t%s\n", renderServerIDList(state.Upgrade.TargetVersionNonVoters)) - } - if len(state.Upgrade.TargetVersionReadReplicas) > 0 { - out = out + fmt.Sprintf(" \tTargetVersionReadReplicas: \n\t\t%s\n", renderServerIDList(state.Upgrade.TargetVersionReadReplicas)) - } - if len(state.Upgrade.OtherVersionVoters) > 0 { - out = out + fmt.Sprintf(" \tOtherVersionVoters: \n\t\t%s\n", renderServerIDList(state.Upgrade.OtherVersionVoters)) - } - if len(state.Upgrade.OtherVersionNonVoters) > 0 { - out = out + fmt.Sprintf(" \tOtherVersionNonVoters: \n\t\t%s\n", renderServerIDList(state.Upgrade.OtherVersionNonVoters)) - } - if len(state.Upgrade.OtherVersionReadReplicas) > 0 { - out = out + fmt.Sprintf(" \tOtherVersionReadReplicas: \n\t\t%s\n", renderServerIDList(state.Upgrade.OtherVersionReadReplicas)) - } - if len(state.Upgrade.RedundancyZones) > 0 { - - out = out + " \tRedundancyZones:\n" - for _, zone := range state.Upgrade.RedundancyZones { - out = out + fmt.Sprintf(" \t\t%v", zone) - } - } - } - return out -} diff --git a/command/operator_autopilot_health_test.go b/command/operator_autopilot_health_test.go deleted file mode 100644 index f42b3269dcd2..000000000000 --- a/command/operator_autopilot_health_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package command - -import ( - "testing" - - "github.com/hashicorp/nomad/ci" - "github.com/mitchellh/cli" - "github.com/shoenig/test/must" -) - -func TestOperator_Autopilot_State_Implements(t *testing.T) { - ci.Parallel(t) - var _ cli.Command = &OperatorAutopilotHealthCommand{} -} - -func TestOperatorAutopilotStateCommand(t *testing.T) { - ci.Parallel(t) - s, _, addr := testServer(t, false, nil) - defer s.Shutdown() - - ui := cli.NewMockUi() - c := &OperatorAutopilotHealthCommand{Meta: Meta{Ui: ui}} - args := []string{"-address=" + addr} - - code := c.Run(args) - must.Eq(t, 0, code, must.Sprintf("got error for exit code: %v", ui.ErrorWriter.String())) - - out := ui.OutputWriter.String() - must.StrContains(t, out, "Healthy") -} diff --git a/command/operator_autopilot_set_test.go b/command/operator_autopilot_set_test.go index abd04a1ce1f0..93aaa7823561 100644 --- a/command/operator_autopilot_set_test.go +++ b/command/operator_autopilot_set_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestOperator_Autopilot_SetConfig_Implements(t *testing.T) { @@ -20,7 +20,7 @@ func TestOperator_Autopilot_SetConfig_Implements(t *testing.T) { func TestOperatorAutopilotSetConfigCommand(t *testing.T) { ci.Parallel(t) - + require := require.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() @@ -39,23 +39,22 @@ func TestOperatorAutopilotSetConfigCommand(t *testing.T) { } code := c.Run(args) - must.Zero(t, code) - + require.EqualValues(0, code) output := strings.TrimSpace(ui.OutputWriter.String()) - must.StrContains(t, output, "Configuration updated") + require.Contains(output, "Configuration updated") client, err := c.Client() - must.NoError(t, err) + require.NoError(err) conf, _, err := client.Operator().AutopilotGetConfiguration(nil) - must.NoError(t, err) - - must.False(t, conf.CleanupDeadServers) - must.Eq(t, 99, conf.MaxTrailingLogs) - must.Eq(t, 3, conf.MinQuorum) - must.Eq(t, 123*time.Millisecond, conf.LastContactThreshold) - must.Eq(t, 123*time.Millisecond, conf.ServerStabilizationTime) - must.True(t, conf.EnableRedundancyZones) - must.True(t, conf.DisableUpgradeMigration) - must.True(t, conf.EnableCustomUpgrades) + require.NoError(err) + + require.False(conf.CleanupDeadServers) + require.EqualValues(99, conf.MaxTrailingLogs) + require.EqualValues(3, conf.MinQuorum) + require.EqualValues(123*time.Millisecond, conf.LastContactThreshold) + require.EqualValues(123*time.Millisecond, conf.ServerStabilizationTime) + require.True(conf.EnableRedundancyZones) + require.True(conf.DisableUpgradeMigration) + require.True(conf.EnableCustomUpgrades) } diff --git a/command/operator_debug_test.go b/command/operator_debug_test.go index dbe3720e6a22..9e811faadca9 100644 --- a/command/operator_debug_test.go +++ b/command/operator_debug_test.go @@ -31,7 +31,6 @@ import ( ) // NOTE: most of these tests cannot be run in parallel -// TODO(shoenig): come back to this one type testCase struct { name string diff --git a/command/operator_gossip_keyring_test.go b/command/operator_gossip_keyring_test.go index 748fef912c2d..73bd90eeeaa2 100644 --- a/command/operator_gossip_keyring_test.go +++ b/command/operator_gossip_keyring_test.go @@ -9,7 +9,6 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" ) func TestGossipKeyringGenerateCommand(t *testing.T) { @@ -18,11 +17,17 @@ func TestGossipKeyringGenerateCommand(t *testing.T) { ui := cli.NewMockUi() c := &OperatorGossipKeyringGenerateCommand{Meta: Meta{Ui: ui}} code := c.Run(nil) - must.Zero(t, code) + if code != 0 { + t.Fatalf("bad: %d", code) + } output := ui.OutputWriter.String() result, err := base64.StdEncoding.DecodeString(output) - must.NoError(t, err) - must.Len(t, 32, result) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(result) != 32 { + t.Fatalf("bad: %#v", result) + } } diff --git a/command/operator_raft_remove_test.go b/command/operator_raft_remove_test.go index 5ac287e055a2..4ace77416209 100644 --- a/command/operator_raft_remove_test.go +++ b/command/operator_raft_remove_test.go @@ -8,7 +8,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestOperator_Raft_RemovePeers_Implements(t *testing.T) { @@ -18,7 +18,7 @@ func TestOperator_Raft_RemovePeers_Implements(t *testing.T) { func TestOperator_Raft_RemovePeer(t *testing.T) { ci.Parallel(t) - + assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() @@ -32,18 +32,21 @@ func TestOperator_Raft_RemovePeer(t *testing.T) { t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) } - must.StrContains(t, ui.ErrorWriter.String(), "cannot give both an address and id") + assert.Contains(ui.ErrorWriter.String(), "cannot give both an address and id") // Neither address nor ID present args = args[:1] code = c.Run(args) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "an address or id is required for the peer to remove") + if code != 1 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } + + assert.Contains(ui.ErrorWriter.String(), "an address or id is required for the peer to remove") } func TestOperator_Raft_RemovePeerAddress(t *testing.T) { ci.Parallel(t) - + assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() @@ -52,15 +55,17 @@ func TestOperator_Raft_RemovePeerAddress(t *testing.T) { args := []string{"-address=" + addr, "-peer-address=nope"} code := c.Run(args) - must.One(t, code) + if code != 1 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } // If we get this error, it proves we sent the address all they through. - must.StrContains(t, ui.ErrorWriter.String(), "address \"nope\" was not found in the Raft configuration") + assert.Contains(ui.ErrorWriter.String(), "address \"nope\" was not found in the Raft configuration") } func TestOperator_Raft_RemovePeerID(t *testing.T) { ci.Parallel(t) - + assert := assert.New(t) s, _, addr := testServer(t, false, nil) defer s.Shutdown() @@ -69,8 +74,10 @@ func TestOperator_Raft_RemovePeerID(t *testing.T) { args := []string{"-address=" + addr, "-peer-id=nope"} code := c.Run(args) - must.One(t, code) + if code != 1 { + t.Fatalf("bad: %d. %#v", code, ui.ErrorWriter.String()) + } // If we get this error, it proves we sent the address all they through. - must.StrContains(t, ui.ErrorWriter.String(), "id \"nope\" was not found in the Raft configuration") + assert.Contains(ui.ErrorWriter.String(), "id \"nope\" was not found in the Raft configuration") } diff --git a/command/operator_scheduler_get_config_test.go b/command/operator_scheduler_get_config_test.go index f1c73d75361d..32b35da06639 100644 --- a/command/operator_scheduler_get_config_test.go +++ b/command/operator_scheduler_get_config_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestOperatorSchedulerGetConfig_Run(t *testing.T) { @@ -23,28 +23,28 @@ func TestOperatorSchedulerGetConfig_Run(t *testing.T) { c := &OperatorSchedulerGetConfig{Meta: Meta{Ui: ui}} // Run the command, so we get the default output and test this. - must.Zero(t, c.Run([]string{"-address=" + addr})) + require.EqualValues(t, 0, c.Run([]string{"-address=" + addr})) s := ui.OutputWriter.String() - must.StrContains(t, s, "Scheduler Algorithm = binpack") - must.StrContains(t, s, "Preemption SysBatch Scheduler = false") + require.Contains(t, s, "Scheduler Algorithm = binpack") + require.Contains(t, s, "Preemption SysBatch Scheduler = false") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Request JSON output and test. - must.Zero(t, c.Run([]string{"-address=" + addr, "-json"})) + require.EqualValues(t, 0, c.Run([]string{"-address=" + addr, "-json"})) s = ui.OutputWriter.String() var js api.SchedulerConfiguration - must.NoError(t, json.Unmarshal([]byte(s), &js)) + require.NoError(t, json.Unmarshal([]byte(s), &js)) ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Request a template output and test. - must.Zero(t, c.Run([]string{"-address=" + addr, "-t='{{printf \"%s!!!\" .SchedulerConfig.SchedulerAlgorithm}}'"})) - must.StrContains(t, ui.OutputWriter.String(), "binpack!!!") + require.EqualValues(t, 0, c.Run([]string{"-address=" + addr, "-t='{{printf \"%s!!!\" .SchedulerConfig.SchedulerAlgorithm}}'"})) + require.Contains(t, ui.OutputWriter.String(), "binpack!!!") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Test an unsupported flag. - must.One(t, c.Run([]string{"-address=" + addr, "-yaml"})) - must.StrContains(t, ui.OutputWriter.String(), "Usage: nomad operator scheduler get-config") + require.EqualValues(t, 1, c.Run([]string{"-address=" + addr, "-yaml"})) + require.Contains(t, ui.OutputWriter.String(), "Usage: nomad operator scheduler get-config") } diff --git a/command/operator_scheduler_set_config_test.go b/command/operator_scheduler_set_config_test.go index 533b4f71f727..4ea0ace79471 100644 --- a/command/operator_scheduler_set_config_test.go +++ b/command/operator_scheduler_set_config_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestOperatorSchedulerSetConfig_Run(t *testing.T) { @@ -23,19 +23,19 @@ func TestOperatorSchedulerSetConfig_Run(t *testing.T) { c := &OperatorSchedulerSetConfig{Meta: Meta{Ui: ui}} bootstrappedConfig, _, err := srv.APIClient().Operator().SchedulerGetConfiguration(nil) - must.NoError(t, err) - must.NotNil(t, bootstrappedConfig.SchedulerConfig) + require.NoError(t, err) + require.NotEmpty(t, bootstrappedConfig.SchedulerConfig) // Run the command with zero value and ensure the configuration does not // change. - must.Zero(t, c.Run([]string{"-address=" + addr})) + require.EqualValues(t, 0, c.Run([]string{"-address=" + addr})) ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Read the configuration again and test that nothing has changed which // ensures our empty flags are working correctly. nonModifiedConfig, _, err := srv.APIClient().Operator().SchedulerGetConfiguration(nil) - must.NoError(t, err) + require.NoError(t, err) schedulerConfigEquals(t, bootstrappedConfig.SchedulerConfig, nonModifiedConfig.SchedulerConfig) // Modify every configuration parameter using the flags. This ensures the @@ -52,12 +52,12 @@ func TestOperatorSchedulerSetConfig_Run(t *testing.T) { "-preempt-sysbatch-scheduler=true", "-preempt-system-scheduler=false", } - must.Zero(t, c.Run(modifyingArgs)) + require.EqualValues(t, 0, c.Run(modifyingArgs)) s := ui.OutputWriter.String() - must.StrContains(t, s, "Scheduler configuration updated!") + require.Contains(t, s, "Scheduler configuration updated!") modifiedConfig, _, err := srv.APIClient().Operator().SchedulerGetConfiguration(nil) - must.NoError(t, err) + require.NoError(t, err) schedulerConfigEquals(t, &api.SchedulerConfiguration{ SchedulerAlgorithm: "spread", PreemptionConfig: api.PreemptionConfig{ @@ -76,36 +76,36 @@ func TestOperatorSchedulerSetConfig_Run(t *testing.T) { // Make a Freudian slip with one of the flags to ensure the usage is // returned. - must.One(t, c.Run([]string{"-address=" + addr, "-pause-evil-broker=true"})) - must.StrContains(t, ui.OutputWriter.String(), "Usage: nomad operator scheduler set-config") + require.EqualValues(t, 1, c.Run([]string{"-address=" + addr, "-pause-evil-broker=true"})) + require.Contains(t, ui.OutputWriter.String(), "Usage: nomad operator scheduler set-config") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Try updating the config using an incorrect check-index value. - must.One(t, c.Run([]string{ + require.EqualValues(t, 1, c.Run([]string{ "-address=" + addr, "-pause-eval-broker=false", "-check-index=1000000", })) - must.StrContains(t, ui.ErrorWriter.String(), "check-index 1000000 does not match does not match current state") + require.Contains(t, ui.ErrorWriter.String(), "check-index 1000000 does not match does not match current state") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() // Try updating the config using a correct check-index value. - must.Zero(t, c.Run([]string{ + require.EqualValues(t, 0, c.Run([]string{ "-address=" + addr, "-pause-eval-broker=false", "-check-index=" + strconv.FormatUint(modifiedConfig.SchedulerConfig.ModifyIndex, 10), })) - must.StrContains(t, ui.OutputWriter.String(), "Scheduler configuration updated!") + require.Contains(t, ui.OutputWriter.String(), "Scheduler configuration updated!") ui.ErrorWriter.Reset() ui.OutputWriter.Reset() } func schedulerConfigEquals(t *testing.T, expected, actual *api.SchedulerConfiguration) { - must.Eq(t, expected.SchedulerAlgorithm, actual.SchedulerAlgorithm) - must.Eq(t, expected.RejectJobRegistration, actual.RejectJobRegistration) - must.Eq(t, expected.MemoryOversubscriptionEnabled, actual.MemoryOversubscriptionEnabled) - must.Eq(t, expected.PauseEvalBroker, actual.PauseEvalBroker) - must.Eq(t, expected.PreemptionConfig, actual.PreemptionConfig) + require.Equal(t, expected.SchedulerAlgorithm, actual.SchedulerAlgorithm) + require.Equal(t, expected.RejectJobRegistration, actual.RejectJobRegistration) + require.Equal(t, expected.MemoryOversubscriptionEnabled, actual.MemoryOversubscriptionEnabled) + require.Equal(t, expected.PauseEvalBroker, actual.PauseEvalBroker) + require.Equal(t, expected.PreemptionConfig, actual.PreemptionConfig) } diff --git a/command/operator_snapshot_inspect_test.go b/command/operator_snapshot_inspect_test.go index c6dbb8622051..3dd480fa4244 100644 --- a/command/operator_snapshot_inspect_test.go +++ b/command/operator_snapshot_inspect_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestOperatorSnapshotInspect_Works(t *testing.T) { @@ -24,7 +24,7 @@ func TestOperatorSnapshotInspect_Works(t *testing.T) { cmd := &OperatorSnapshotInspectCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{snapPath}) - must.Zero(t, code) + require.Zero(t, code) output := ui.OutputWriter.String() for _, key := range []string{ @@ -34,7 +34,7 @@ func TestOperatorSnapshotInspect_Works(t *testing.T) { "Term", "Version", } { - must.StrContains(t, output, key) + require.Contains(t, output, key) } } @@ -47,15 +47,15 @@ func TestOperatorSnapshotInspect_HandlesFailure(t *testing.T) { filepath.Join(tmpDir, "invalid.snap"), []byte("invalid data"), 0600) - must.NoError(t, err) + require.NoError(t, err) t.Run("not found", func(t *testing.T) { ui := cli.NewMockUi() cmd := &OperatorSnapshotInspectCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{filepath.Join(tmpDir, "foo")}) - must.Positive(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "no such file") + require.NotZero(t, code) + require.Contains(t, ui.ErrorWriter.String(), "no such file") }) t.Run("invalid file", func(t *testing.T) { @@ -63,8 +63,8 @@ func TestOperatorSnapshotInspect_HandlesFailure(t *testing.T) { cmd := &OperatorSnapshotInspectCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{filepath.Join(tmpDir, "invalid.snap")}) - must.Positive(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "Error verifying snapshot") + require.NotZero(t, code) + require.Contains(t, ui.ErrorWriter.String(), "Error verifying snapshot") }) } @@ -94,7 +94,7 @@ func generateSnapshotFile(t *testing.T, prepare func(srv *agent.TestAgent, clien "--address=" + url, dest, }) - must.Zero(t, code) + require.Zero(t, code) return dest } diff --git a/command/operator_snapshot_restore_test.go b/command/operator_snapshot_restore_test.go index 434d788bc247..8f81b3c4c330 100644 --- a/command/operator_snapshot_restore_test.go +++ b/command/operator_snapshot_restore_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestOperatorSnapshotRestore_Works(t *testing.T) { @@ -44,7 +44,7 @@ job "snapshot-test-job" { cmd.JobGetter.testStdin = strings.NewReader(sampleJob) code := cmd.Run([]string{"--address=" + url, "-detach", "-"}) - must.Zero(t, code) + require.Zero(t, code) }) srv, _, url := testServer(t, false, func(c *agent.Config) { @@ -60,20 +60,20 @@ job "snapshot-test-job" { // job is not found before restore j, err := srv.Agent.Server().State().JobByID(nil, structs.DefaultNamespace, "snapshot-test-job") - must.NoError(t, err) - must.Nil(t, j) + require.NoError(t, err) + require.Nil(t, j) ui := cli.NewMockUi() cmd := &OperatorSnapshotRestoreCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{"--address=" + url, snapshotPath}) - must.Eq(t, "", ui.ErrorWriter.String()) - must.Zero(t, code) - must.StrContains(t, ui.OutputWriter.String(), "Snapshot Restored") + require.Empty(t, ui.ErrorWriter.String()) + require.Zero(t, code) + require.Contains(t, ui.OutputWriter.String(), "Snapshot Restored") foundJob, err := srv.Agent.Server().State().JobByID(nil, structs.DefaultNamespace, "snapshot-test-job") - must.NoError(t, err) - must.Eq(t, "snapshot-test-job", foundJob.ID) + require.NoError(t, err) + require.Equal(t, "snapshot-test-job", foundJob.ID) } func TestOperatorSnapshotRestore_Fails(t *testing.T) { @@ -84,12 +84,12 @@ func TestOperatorSnapshotRestore_Fails(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) ui.ErrorWriter.Reset() // Fails when specified file does not exist code = cmd.Run([]string{"/unicorns/leprechauns"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "no such file") + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "no such file") } diff --git a/command/operator_snapshot_save_test.go b/command/operator_snapshot_save_test.go index 55b5e569171d..2473dc583fb8 100644 --- a/command/operator_snapshot_save_test.go +++ b/command/operator_snapshot_save_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper/snapshot" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestOperatorSnapshotSave_Works(t *testing.T) { @@ -39,15 +39,15 @@ func TestOperatorSnapshotSave_Works(t *testing.T) { "--address=" + url, dest, }) - must.Zero(t, code) - must.StrContains(t, ui.OutputWriter.String(), "State file written to "+dest) + require.Zero(t, code) + require.Contains(t, ui.OutputWriter.String(), "State file written to "+dest) f, err := os.Open(dest) - must.NoError(t, err) + require.NoError(t, err) meta, err := snapshot.Verify(f) - must.NoError(t, err) - must.Positive(t, meta.Index) + require.NoError(t, err) + require.NotZero(t, meta.Index) } func TestOperatorSnapshotSave_Fails(t *testing.T) { @@ -58,12 +58,12 @@ func TestOperatorSnapshotSave_Fails(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) ui.ErrorWriter.Reset() // Fails when specified file does not exist code = cmd.Run([]string{"/unicorns/leprechauns"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "no such file") + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "no such file") } diff --git a/command/plugin_status_test.go b/command/plugin_status_test.go index 85c3b2a3c21f..e62306c726c5 100644 --- a/command/plugin_status_test.go +++ b/command/plugin_status_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/state" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestPluginStatusCommand_Implements(t *testing.T) { @@ -26,18 +26,18 @@ func TestPluginStatusCommand_Fails(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) + require.Equal(t, 1, code) out := ui.ErrorWriter.String() - must.StrContains(t, out, commandErrorText(cmd)) + require.Contains(t, out, commandErrorText(cmd)) ui.ErrorWriter.Reset() // Test an unsupported plugin type. code = cmd.Run([]string{"-type=not-a-plugin"}) - must.One(t, code) + require.Equal(t, 1, code) out = ui.ErrorWriter.String() - must.StrContains(t, out, "Unsupported plugin type: not-a-plugin") + require.Contains(t, out, "Unsupported plugin type: not-a-plugin") ui.ErrorWriter.Reset() } @@ -57,13 +57,13 @@ func TestPluginStatusCommand_AutocompleteArgs(t *testing.T) { defer cleanup() ws := memdb.NewWatchSet() plug, err := s.CSIPluginByID(ws, id) - must.NoError(t, err) + require.NoError(t, err) prefix := plug.ID[:len(plug.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, plug.ID, res[0]) + require.Equal(t, 1, len(res)) + require.Equal(t, plug.ID, res[0]) } diff --git a/command/quota_delete_test.go b/command/quota_delete_test.go index c084853fd2aa..24e7ca98de95 100644 --- a/command/quota_delete_test.go +++ b/command/quota_delete_test.go @@ -16,7 +16,7 @@ import ( "github.com/hashicorp/nomad/helper/uuid" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestQuotaDeleteCommand_Implements(t *testing.T) { @@ -60,7 +60,7 @@ func TestQuotaDeleteCommand_Good(t *testing.T) { // Create a quota to delete qs := testQuotaSpec() _, err := client.Quotas().Register(qs, nil) - must.NoError(t, err) + assert.Nil(t, err) // Delete a namespace if code := cmd.Run([]string{"-address=" + url, qs.Name}); code != 0 { @@ -68,12 +68,13 @@ func TestQuotaDeleteCommand_Good(t *testing.T) { } quotas, _, err := client.Quotas().List(nil) - must.NoError(t, err) - must.SliceEmpty(t, quotas) + assert.Nil(t, err) + assert.Len(t, quotas, 0) } func TestQuotaDeleteCommand_AutocompleteArgs(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -84,14 +85,14 @@ func TestQuotaDeleteCommand_AutocompleteArgs(t *testing.T) { // Create a quota qs := testQuotaSpec() _, err := client.Quotas().Register(qs, nil) - must.NoError(t, err) + assert.Nil(err) args := complete.Args{Last: "quot"} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, qs.Name, res[0]) + assert.Equal(1, len(res)) + assert.Equal(qs.Name, res[0]) } // testQuotaSpec returns a test quota specification diff --git a/command/quota_init_test.go b/command/quota_init_test.go index 3a6d3869f4dc..95c995e06d1b 100644 --- a/command/quota_init_test.go +++ b/command/quota_init_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestQuotaInitCommand_Implements(t *testing.T) { @@ -24,44 +24,44 @@ func TestQuotaInitCommand_Run_HCL(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) ui.ErrorWriter.Reset() // Ensure we change the cwd back origDir, err := os.Getwd() - must.NoError(t, err) + require.NoError(t, err) defer os.Chdir(origDir) // Create a temp dir and change into it dir := t.TempDir() err = os.Chdir(dir) - must.NoError(t, err) + require.NoError(t, err) // Works if the file doesn't exist code = cmd.Run([]string{}) - must.Eq(t, "", ui.ErrorWriter.String()) - must.Zero(t, code) + require.Empty(t, ui.ErrorWriter.String()) + require.Zero(t, code) content, err := os.ReadFile(DefaultHclQuotaInitName) - must.NoError(t, err) - must.Eq(t, defaultHclQuotaSpec, string(content)) + require.NoError(t, err) + require.Equal(t, defaultHclQuotaSpec, string(content)) // Fails if the file exists code = cmd.Run([]string{}) - must.StrContains(t, ui.ErrorWriter.String(), "exists") - must.One(t, code) + require.Contains(t, ui.ErrorWriter.String(), "exists") + require.Equal(t, 1, code) ui.ErrorWriter.Reset() // Works if file is passed code = cmd.Run([]string{"mytest.hcl"}) - must.Eq(t, "", ui.ErrorWriter.String()) - must.Zero(t, code) + require.Empty(t, ui.ErrorWriter.String()) + require.Zero(t, code) content, err = os.ReadFile("mytest.hcl") - must.NoError(t, err) - must.Eq(t, defaultHclQuotaSpec, string(content)) + require.NoError(t, err) + require.Equal(t, defaultHclQuotaSpec, string(content)) } func TestQuotaInitCommand_Run_JSON(t *testing.T) { @@ -71,42 +71,42 @@ func TestQuotaInitCommand_Run_JSON(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) ui.ErrorWriter.Reset() // Ensure we change the cwd back origDir, err := os.Getwd() - must.NoError(t, err) + require.NoError(t, err) defer os.Chdir(origDir) // Create a temp dir and change into it dir := t.TempDir() err = os.Chdir(dir) - must.NoError(t, err) + require.NoError(t, err) // Works if the file doesn't exist code = cmd.Run([]string{"-json"}) - must.Eq(t, "", ui.ErrorWriter.String()) - must.Zero(t, code) + require.Empty(t, ui.ErrorWriter.String()) + require.Zero(t, code) content, err := os.ReadFile(DefaultJsonQuotaInitName) - must.NoError(t, err) - must.Eq(t, defaultJsonQuotaSpec, string(content)) + require.NoError(t, err) + require.Equal(t, defaultJsonQuotaSpec, string(content)) // Fails if the file exists code = cmd.Run([]string{"-json"}) - must.StrContains(t, ui.ErrorWriter.String(), "exists") - must.One(t, code) + require.Contains(t, ui.ErrorWriter.String(), "exists") + require.Equal(t, 1, code) ui.ErrorWriter.Reset() // Works if file is passed code = cmd.Run([]string{"-json", "mytest.json"}) - must.Eq(t, "", ui.ErrorWriter.String()) - must.Zero(t, code) + require.Empty(t, ui.ErrorWriter.String()) + require.Zero(t, code) content, err = os.ReadFile("mytest.json") - must.NoError(t, err) - must.Eq(t, defaultJsonQuotaSpec, string(content)) + require.NoError(t, err) + require.Equal(t, defaultJsonQuotaSpec, string(content)) } diff --git a/command/quota_list_test.go b/command/quota_list_test.go index 729febf9d6d2..19243bc41faa 100644 --- a/command/quota_list_test.go +++ b/command/quota_list_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func TestQuotaListCommand_Implements(t *testing.T) { @@ -45,6 +45,7 @@ func TestQuotaListCommand_Fails(t *testing.T) { func TestQuotaListCommand_List(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) // Create a server srv, client, url := testServer(t, true, nil) @@ -56,7 +57,7 @@ func TestQuotaListCommand_List(t *testing.T) { // Create a quota qs := testQuotaSpec() _, err := client.Quotas().Register(qs, nil) - must.NoError(t, err) + assert.Nil(err) // List should contain the new quota if code := cmd.Run([]string{"-address=" + url}); code != 0 { diff --git a/command/recommendation_apply_test.go b/command/recommendation_apply_test.go index 360c1e8eb518..259ddbad85c4 100644 --- a/command/recommendation_apply_test.go +++ b/command/recommendation_apply_test.go @@ -11,12 +11,12 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestRecommendationApplyCommand_Run(t *testing.T) { ci.Parallel(t) - + require := require.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -47,9 +47,9 @@ func TestRecommendationApplyCommand_Run(t *testing.T) { // Register a test job to write a recommendation against. testJob := testJob("recommendation_apply") regResp, _, err := client.Jobs().Register(testJob, nil) - must.NoError(t, err) + require.NoError(err) registerCode := waitForSuccess(ui, client, fullId, t, regResp.EvalID) - must.Zero(t, registerCode) + require.Equal(0, registerCode) // Write a recommendation. rec := api.Recommendation{ @@ -63,15 +63,15 @@ func TestRecommendationApplyCommand_Run(t *testing.T) { } recResp, _, err := client.Recommendations().Upsert(&rec, nil) if srv.Enterprise { - must.NoError(t, err) + require.NoError(err) // Read the recommendation out to ensure it is there as a control on // later tests. recInfo, _, err := client.Recommendations().Info(recResp.ID, nil) - must.NoError(t, err) - must.NotNil(t, recInfo) + require.NoError(err) + require.NotNil(recInfo) } else { - must.ErrorContains(t, err, "Nomad Enterprise only endpoint") + require.Error(err, "Nomad Enterprise only endpoint") } // Only perform the call if we are running enterprise tests. Otherwise the @@ -80,18 +80,18 @@ func TestRecommendationApplyCommand_Run(t *testing.T) { return } code := cmd.Run([]string{"-address=" + url, recResp.ID}) - must.Zero(t, code) + require.Equal(0, code) // Perform an info call on the recommendation which should return not // found. recInfo, _, err := client.Recommendations().Info(recResp.ID, nil) - must.ErrorContains(t, err, "not found") - must.Nil(t, recInfo) + require.Error(err, "not found") + require.Nil(recInfo) // Check the new jobspec to see if the resource value has changed. jobResp, _, err := client.Jobs().Info(*testJob.ID, nil) - must.NoError(t, err) - must.Eq(t, 1, *jobResp.TaskGroups[0].Tasks[0].Resources.CPU) + require.NoError(err) + require.Equal(1, *jobResp.TaskGroups[0].Tasks[0].Resources.CPU) } func TestRecommendationApplyCommand_AutocompleteArgs(t *testing.T) { diff --git a/command/recommendation_dismiss_test.go b/command/recommendation_dismiss_test.go index 86e8877ebd68..9393ddfafb36 100644 --- a/command/recommendation_dismiss_test.go +++ b/command/recommendation_dismiss_test.go @@ -7,18 +7,19 @@ import ( "fmt" "testing" - "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/command/agent" - "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/command/agent" + "github.com/hashicorp/nomad/testutil" ) func TestRecommendationDismissCommand_Run(t *testing.T) { ci.Parallel(t) - + require := require.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -50,9 +51,9 @@ func TestRecommendationDismissCommand_Run(t *testing.T) { // Register a test job to write a recommendation against. testJob := testJob("recommendation_dismiss") regResp, _, err := client.Jobs().Register(testJob, nil) - must.NoError(t, err) + require.NoError(err) registerCode := waitForSuccess(ui, client, fullId, t, regResp.EvalID) - must.Zero(t, registerCode) + require.Equal(0, registerCode) // Write a recommendation. rec := api.Recommendation{ @@ -66,15 +67,15 @@ func TestRecommendationDismissCommand_Run(t *testing.T) { } recResp, _, err := client.Recommendations().Upsert(&rec, nil) if srv.Enterprise { - must.NoError(t, err) + require.NoError(err) // Read the recommendation out to ensure it is there as a control on // later tests. recInfo, _, err := client.Recommendations().Info(recResp.ID, nil) - must.NoError(t, err) - must.NotNil(t, recInfo) + require.NoError(err) + require.NotNil(recInfo) } else { - must.ErrorContains(t, err, "Nomad Enterprise only endpoint") + require.Error(err, "Nomad Enterprise only endpoint") } // Only perform the call if we are running enterprise tests. Otherwise the @@ -83,15 +84,15 @@ func TestRecommendationDismissCommand_Run(t *testing.T) { return } code := cmd.Run([]string{"-address=" + url, recResp.ID}) - must.Zero(t, code) + require.Equal(0, code) out := ui.OutputWriter.String() - must.StrContains(t, out, "Successfully dismissed recommendation") + require.Contains(out, "Successfully dismissed recommendation") // Perform an info call on the recommendation which should return not // found. recInfo, _, err := client.Recommendations().Info(recResp.ID, nil) - must.ErrorContains(t, err, "not found") - must.Nil(t, recInfo) + require.Error(err, "not found") + require.Nil(recInfo) } func TestRecommendationDismissCommand_AutocompleteArgs(t *testing.T) { @@ -113,10 +114,12 @@ func TestRecommendationDismissCommand_AutocompleteArgs(t *testing.T) { } func testRecommendationAutocompleteCommand(t *testing.T, client *api.Client, srv *agent.TestAgent, cmd *RecommendationAutocompleteCommand) { + require := require.New(t) + // Register a test job to write a recommendation against. testJob := testJob("recommendation_autocomplete") _, _, err := client.Jobs().Register(testJob, nil) - must.NoError(t, err) + require.NoError(err) // Write a recommendation. rec := &api.Recommendation{ @@ -130,9 +133,9 @@ func testRecommendationAutocompleteCommand(t *testing.T, client *api.Client, srv } rec, _, err = client.Recommendations().Upsert(rec, nil) if srv.Enterprise { - must.NoError(t, err) + require.NoError(err) } else { - must.ErrorContains(t, err, "Nomad Enterprise only endpoint") + require.Error(err, "Nomad Enterprise only endpoint") return } @@ -141,6 +144,6 @@ func testRecommendationAutocompleteCommand(t *testing.T, client *api.Client, srv predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, rec.ID, res[0]) + require.Equal(1, len(res)) + require.Equal(rec.ID, res[0]) } diff --git a/command/recommendation_info_test.go b/command/recommendation_info_test.go index 757842c9bc1c..76dc2d2fe9c1 100644 --- a/command/recommendation_info_test.go +++ b/command/recommendation_info_test.go @@ -7,16 +7,17 @@ import ( "fmt" "testing" - "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" + + "github.com/hashicorp/nomad/api" + "github.com/hashicorp/nomad/testutil" ) func TestRecommendationInfoCommand_Run(t *testing.T) { ci.Parallel(t) - + require := require.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() testutil.WaitForResult(func() (bool, error) { @@ -45,20 +46,20 @@ func TestRecommendationInfoCommand_Run(t *testing.T) { // Perform an initial call, which should return a not found error. code := cmd.Run([]string{"-address=" + url, "2c13f001-f5b6-ce36-03a5-e37afe160df5"}) if srv.Enterprise { - must.One(t, code) + require.Equal(1, code) out := ui.ErrorWriter.String() - must.StrContains(t, out, "Recommendation not found") + require.Contains(out, "Recommendation not found") } else { - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "Nomad Enterprise only endpoint") + require.Equal(1, code) + require.Contains(ui.ErrorWriter.String(), "Nomad Enterprise only endpoint") } // Register a test job to write a recommendation against. testJob := testJob("recommendation_info") regResp, _, err := client.Jobs().Register(testJob, nil) - must.NoError(t, err) + require.NoError(err) registerCode := waitForSuccess(ui, client, fullId, t, regResp.EvalID) - must.Zero(t, registerCode) + require.Equal(0, registerCode) // Write a recommendation. rec := api.Recommendation{ @@ -72,21 +73,21 @@ func TestRecommendationInfoCommand_Run(t *testing.T) { } recResp, _, err := client.Recommendations().Upsert(&rec, nil) if srv.Enterprise { - must.NoError(t, err) + require.NoError(err) } else { - must.ErrorContains(t, err, "Nomad Enterprise only endpoint") + require.Error(err, "Nomad Enterprise only endpoint") } // Only perform the call if we are running enterprise tests. Otherwise the // recResp object will be nil. if srv.Enterprise { code = cmd.Run([]string{"-address=" + url, recResp.ID}) - must.Zero(t, code) + require.Equal(0, code) out := ui.OutputWriter.String() - must.StrContains(t, out, "test-meta-entry") - must.StrContains(t, out, "p13") - must.StrContains(t, out, "1.13") - must.StrContains(t, out, recResp.ID) + require.Contains(out, "test-meta-entry") + require.Contains(out, "p13") + require.Contains(out, "1.13") + require.Contains(out, recResp.ID) } } diff --git a/command/recommendation_list_test.go b/command/recommendation_list_test.go index 9701b7e0e3e2..1083b3848ac8 100644 --- a/command/recommendation_list_test.go +++ b/command/recommendation_list_test.go @@ -11,12 +11,13 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestRecommendationListCommand_Run(t *testing.T) { ci.Parallel(t) - + require := require.New(t) srv, client, url := testServer(t, true, nil) defer srv.Shutdown() @@ -30,20 +31,20 @@ func TestRecommendationListCommand_Run(t *testing.T) { // Perform an initial list, which should return zero results. code := cmd.Run([]string{"-address=" + url}) if srv.Enterprise { - must.Zero(t, code) + require.Equal(0, code) out := ui.OutputWriter.String() - must.StrContains(t, out, "No recommendations found") + require.Contains(out, "No recommendations found") } else { - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "Nomad Enterprise only endpoint") + require.Equal(1, code) + require.Contains(ui.ErrorWriter.String(), "Nomad Enterprise only endpoint") } // Register a test job to write a recommendation against. testJob := testJob("recommendation_list") regResp, _, err := client.Jobs().Register(testJob, nil) - must.NoError(t, err) + require.NoError(err) registerCode := waitForSuccess(ui, client, fullId, t, regResp.EvalID) - must.Zero(t, registerCode) + require.Equal(0, registerCode) // Write a recommendation. rec := api.Recommendation{ @@ -57,26 +58,26 @@ func TestRecommendationListCommand_Run(t *testing.T) { } _, _, err = client.Recommendations().Upsert(&rec, nil) if srv.Enterprise { - must.NoError(t, err) + require.NoError(err) } else { - must.ErrorContains(t, err, "Nomad Enterprise only endpoint") + require.Error(err, "Nomad Enterprise only endpoint") } // Perform a new list which should yield results. code = cmd.Run([]string{"-address=" + url}) if srv.Enterprise { - must.Zero(t, code) + require.Equal(0, code) out := ui.OutputWriter.String() - must.StrContains(t, out, "ID") - must.StrContains(t, out, "Job") - must.StrContains(t, out, "Group") - must.StrContains(t, out, "Task") - must.StrContains(t, out, "Resource") - must.StrContains(t, out, "Value") - must.StrContains(t, out, "CPU") + require.Contains(out, "ID") + require.Contains(out, "Job") + require.Contains(out, "Group") + require.Contains(out, "Task") + require.Contains(out, "Resource") + require.Contains(out, "Value") + require.Contains(out, "CPU") } else { - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "Nomad Enterprise only endpoint") + require.Equal(1, code) + require.Contains(ui.ErrorWriter.String(), "Nomad Enterprise only endpoint") } } @@ -162,7 +163,7 @@ func TestRecommendationListCommand_Sort(t *testing.T) { t.Run(tc.name, func(t *testing.T) { sortedRecs := recommendationList{r: tc.inputRecommendationList} sort.Sort(sortedRecs) - must.Eq(t, tc.expectedOutputList, sortedRecs.r) + assert.Equal(t, tc.expectedOutputList, sortedRecs.r, tc.name) }) } } diff --git a/command/scaling_policy_list_test.go b/command/scaling_policy_list_test.go index c5304e5cdad2..c7f6aab19d63 100644 --- a/command/scaling_policy_list_test.go +++ b/command/scaling_policy_list_test.go @@ -10,12 +10,12 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestScalingPolicyListCommand_Run(t *testing.T) { ci.Parallel(t) - + require := require.New(t) srv, client, url := testServer(t, false, nil) defer srv.Shutdown() @@ -24,9 +24,9 @@ func TestScalingPolicyListCommand_Run(t *testing.T) { // Perform an initial list, which should return zero results. code := cmd.Run([]string{"-address=" + url}) - must.Zero(t, code) + require.Equal(0, code) out := ui.OutputWriter.String() - must.StrContains(t, out, "No policies found") + require.Contains(out, "No policies found") // Generate two test jobs. jobs := []*api.Job{testJob("scaling_policy_list_1"), testJob("scaling_policy_list_2")} @@ -43,17 +43,17 @@ func TestScalingPolicyListCommand_Run(t *testing.T) { for _, job := range jobs { job.TaskGroups[0].Scaling = &scalingPolicy _, _, err := client.Jobs().Register(job, nil) - must.NoError(t, err) + require.NoError(err) } // Perform a new list which should yield results.. code = cmd.Run([]string{"-address=" + url}) - must.Zero(t, code) + require.Equal(0, code) out = ui.OutputWriter.String() - must.StrContains(t, out, "ID") - must.StrContains(t, out, "Enabled") - must.StrContains(t, out, "Type") - must.StrContains(t, out, "Target") - must.StrContains(t, out, "scaling_policy_list_1") - must.StrContains(t, out, "scaling_policy_list_2") + require.Contains(out, "ID") + require.Contains(out, "Enabled") + require.Contains(out, "Type") + require.Contains(out, "Target") + require.Contains(out, "scaling_policy_list_1") + require.Contains(out, "scaling_policy_list_2") } diff --git a/command/scaling_policy_test.go b/command/scaling_policy_test.go index a804cc1758d4..a80c47bc4d08 100644 --- a/command/scaling_policy_test.go +++ b/command/scaling_policy_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/hashicorp/nomad/ci" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" ) func Test_formatScalingPolicyTarget(t *testing.T) { @@ -49,7 +49,7 @@ func Test_formatScalingPolicyTarget(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actualOutput := formatScalingPolicyTarget(tc.inputMap) - must.Eq(t, tc.expectedOutput, actualOutput) + assert.Equal(t, tc.expectedOutput, actualOutput, tc.name) }) } } diff --git a/command/service_delete_test.go b/command/service_delete_test.go index 47d5d089ec73..f9ca9a3ec260 100644 --- a/command/service_delete_test.go +++ b/command/service_delete_test.go @@ -13,7 +13,8 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestServiceDeleteCommand_Run(t *testing.T) { @@ -41,7 +42,7 @@ func TestServiceDeleteCommand_Run(t *testing.T) { } return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) ui := cli.NewMockUi() @@ -54,26 +55,26 @@ func TestServiceDeleteCommand_Run(t *testing.T) { // Run the command without any arguments to ensure we are performing this // check. - must.One(t, cmd.Run([]string{"-address=" + url})) - must.StrContains(t, ui.ErrorWriter.String(), + require.Equal(t, 1, cmd.Run([]string{"-address=" + url})) + require.Contains(t, ui.ErrorWriter.String(), "This command takes two arguments: and ") ui.ErrorWriter.Reset() // Create an upsert some service registrations. serviceRegs := mock.ServiceRegistrations() - must.NoError(t, + assert.NoError(t, srv.Agent.Server().State().UpsertServiceRegistrations(structs.MsgTypeTestSetup, 10, serviceRegs)) // Detail the service within the default namespace as we need the ID. defaultNSService, _, err := client.Services().Get(serviceRegs[0].ServiceName, nil) - must.NoError(t, err) - must.Len(t, 1, defaultNSService) + require.NoError(t, err) + require.Len(t, defaultNSService, 1) // Attempt to manually delete the service registration within the default // namespace. code := cmd.Run([]string{"-address=" + url, "service-discovery-nomad-delete", defaultNSService[0].ID}) - must.Zero(t, code) - must.StrContains(t, ui.OutputWriter.String(), "Successfully deleted service registration") + require.Equal(t, 0, code) + require.Contains(t, ui.OutputWriter.String(), "Successfully deleted service registration") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -82,15 +83,15 @@ func TestServiceDeleteCommand_Run(t *testing.T) { platformNSService, _, err := client.Services().Get(serviceRegs[1].ServiceName, &api.QueryOptions{ Namespace: serviceRegs[1].Namespace}, ) - must.NoError(t, err) - must.Len(t, 1, platformNSService) + require.NoError(t, err) + require.Len(t, platformNSService, 1) // Attempt to manually delete the service registration within the platform // namespace. code = cmd.Run([]string{"-address=" + url, "-namespace=" + platformNSService[0].Namespace, "service-discovery-nomad-delete", platformNSService[0].ID}) - must.Zero(t, code) - must.StrContains(t, ui.OutputWriter.String(), "Successfully deleted service registration") + require.Equal(t, 0, code) + require.Contains(t, ui.OutputWriter.String(), "Successfully deleted service registration") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() diff --git a/command/service_info_test.go b/command/service_info_test.go index edbc2eef84d6..f0c865c99cb5 100644 --- a/command/service_info_test.go +++ b/command/service_info_test.go @@ -5,7 +5,6 @@ package command import ( "fmt" - "strings" "testing" "time" @@ -13,7 +12,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -37,7 +36,7 @@ func TestServiceInfoCommand_Run(t *testing.T) { } return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) ui := cli.NewMockUi() @@ -50,8 +49,8 @@ func TestServiceInfoCommand_Run(t *testing.T) { // Run the command without any arguments to ensure we are performing this // check. - must.One(t, cmd.Run([]string{"-address=" + url})) - must.StrContains(t, ui.ErrorWriter.String(), + require.Equal(t, 1, cmd.Run([]string{"-address=" + url})) + require.Contains(t, ui.ErrorWriter.String(), "This command takes one argument: ") ui.ErrorWriter.Reset() @@ -62,9 +61,9 @@ func TestServiceInfoCommand_Run(t *testing.T) { // Register that job. regResp, _, err := client.Jobs().Register(testJob, nil) - must.NoError(t, err) + require.NoError(t, err) registerCode := waitForSuccess(ui, client, fullId, t, regResp.EvalID) - must.Zero(t, registerCode) + require.Equal(t, 0, registerCode) // Reset the output writer, otherwise we will have additional information here. ui.OutputWriter.Reset() @@ -73,8 +72,6 @@ func TestServiceInfoCommand_Run(t *testing.T) { // therefore needs this wrapper to account for eventual service // registration. One this has completed, we can perform lookups without // similar wraps. - // - // TODO(shoenig) clean this up require.Eventually(t, func() bool { defer ui.OutputWriter.Reset() @@ -86,25 +83,25 @@ func TestServiceInfoCommand_Run(t *testing.T) { // Test each header and data entry. s := ui.OutputWriter.String() - if !strings.Contains(s, "Job ID") { + if !assert.Contains(t, s, "Job ID") { return false } - if !strings.Contains(s, "Address") { + if !assert.Contains(t, s, "Address") { return false } - if !strings.Contains(s, "Node ID") { + if !assert.Contains(t, s, "Node ID") { return false } - if !strings.Contains(s, "Alloc ID") { + if !assert.Contains(t, s, "Alloc ID") { return false } - if !strings.Contains(s, "service-discovery-nomad-info") { + if !assert.Contains(t, s, "service-discovery-nomad-info") { return false } - if !strings.Contains(s, ":9999") { + if !assert.Contains(t, s, ":9999") { return false } - if !strings.Contains(s, "[foo,bar]") { + if !assert.Contains(t, s, "[foo,bar]") { return false } return true @@ -112,16 +109,16 @@ func TestServiceInfoCommand_Run(t *testing.T) { // Perform a verbose lookup. code := cmd.Run([]string{"-address=" + url, "-verbose", "service-discovery-nomad-info"}) - must.Zero(t, code) + require.Equal(t, 0, code) // Test KV entries. s := ui.OutputWriter.String() - must.StrContains(t, s, "Service Name = service-discovery-nomad-info") - must.StrContains(t, s, "Namespace = default") - must.StrContains(t, s, "Job ID = service-discovery-nomad-info") - must.StrContains(t, s, "Datacenter = dc1") - must.StrContains(t, s, "Address = :9999") - must.StrContains(t, s, "Tags = [foo,bar]") + require.Contains(t, s, "Service Name = service-discovery-nomad-info") + require.Contains(t, s, "Namespace = default") + require.Contains(t, s, "Job ID = service-discovery-nomad-info") + require.Contains(t, s, "Datacenter = dc1") + require.Contains(t, s, "Address = :9999") + require.Contains(t, s, "Tags = [foo,bar]") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() @@ -177,7 +174,7 @@ func Test_argsWithNewPageToken(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actualOutput := argsWithNewPageToken(tc.inputOsArgs, tc.inputNextToken) - must.Eq(t, tc.expectedOutput, actualOutput) + require.Equal(t, tc.expectedOutput, actualOutput) }) } } diff --git a/command/service_list_test.go b/command/service_list_test.go index 9d8292d8dd42..344fbcf62faa 100644 --- a/command/service_list_test.go +++ b/command/service_list_test.go @@ -12,7 +12,6 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -37,7 +36,7 @@ func TestServiceListCommand_Run(t *testing.T) { } return true, nil }, func(err error) { - must.NoError(t, err) + require.NoError(t, err) }) ui := cli.NewMockUi() @@ -50,8 +49,8 @@ func TestServiceListCommand_Run(t *testing.T) { // Run the command with some random arguments to ensure we are performing // this check. - must.One(t, cmd.Run([]string{"-address=" + url, "pretty-please"})) - must.StrContains(t, ui.ErrorWriter.String(), "This command takes no arguments") + require.Equal(t, 1, cmd.Run([]string{"-address=" + url, "pretty-please"})) + require.Contains(t, ui.ErrorWriter.String(), "This command takes no arguments") ui.ErrorWriter.Reset() // Create a test job with a Nomad service. @@ -61,9 +60,9 @@ func TestServiceListCommand_Run(t *testing.T) { // Register that job. regResp, _, err := client.Jobs().Register(testJob, nil) - must.NoError(t, err) + require.NoError(t, err) registerCode := waitForSuccess(ui, client, fullId, t, regResp.EvalID) - must.Zero(t, registerCode) + require.Equal(t, 0, registerCode) // Reset the output writer, otherwise we will have additional information here. ui.OutputWriter.Reset() @@ -72,8 +71,6 @@ func TestServiceListCommand_Run(t *testing.T) { // therefore needs this wrapper to account for eventual service // registration. One this has completed, we can perform lookups without // similar wraps. - // - // TODO(shoenig) clean this up require.Eventually(t, func() bool { defer ui.OutputWriter.Reset() @@ -102,16 +99,16 @@ func TestServiceListCommand_Run(t *testing.T) { // Perform a wildcard namespace lookup. code := cmd.Run([]string{"-address=" + url, "-namespace", "*"}) - must.Zero(t, code) + require.Equal(t, 0, code) // Test each header and data entry. s := ui.OutputWriter.String() - must.StrContains(t, s, "Service Name") - must.StrContains(t, s, "Namespace") - must.StrContains(t, s, "Tags") - must.StrContains(t, s, "service-discovery-nomad-list") - must.StrContains(t, s, "default") - must.StrContains(t, s, "[bar,foo]") + require.Contains(t, s, "Service Name") + require.Contains(t, s, "Namespace") + require.Contains(t, s, "Tags") + require.Contains(t, s, "service-discovery-nomad-list") + require.Contains(t, s, "default") + require.Contains(t, s, "[bar,foo]") ui.OutputWriter.Reset() ui.ErrorWriter.Reset() diff --git a/command/setup_vault_test.go b/command/setup_vault_test.go index 9f561ee6fe68..9bce8d8b2a56 100644 --- a/command/setup_vault_test.go +++ b/command/setup_vault_test.go @@ -7,12 +7,13 @@ import ( "fmt" "testing" + "github.com/mitchellh/cli" + "github.com/shoenig/test/must" + "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent" "github.com/hashicorp/nomad/helper/pointer" - "github.com/mitchellh/cli" - "github.com/shoenig/test/must" ) func TestSetupVaultCommand_Run(t *testing.T) { diff --git a/command/status_test.go b/command/status_test.go index cce94977b4e7..33e8423e1638 100644 --- a/command/status_test.go +++ b/command/status_test.go @@ -15,11 +15,13 @@ import ( "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStatusCommand_Run_JobStatus(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -30,20 +32,22 @@ func TestStatusCommand_Run_JobStatus(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() j := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) // Query to check the job status - code := cmd.Run([]string{"-address=" + url, j.ID}) - must.Zero(t, code) + if code := cmd.Run([]string{"-address=" + url, j.ID}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } out := ui.OutputWriter.String() - must.StrContains(t, out, j.ID) + assert.Contains(out, j.ID) ui.OutputWriter.Reset() } func TestStatusCommand_Run_JobStatus_MultiMatch(t *testing.T) { ci.Parallel(t) + assert := assert.New(t) srv, _, url := testServer(t, true, nil) defer srv.Shutdown() @@ -56,20 +60,22 @@ func TestStatusCommand_Run_JobStatus_MultiMatch(t *testing.T) { j := mock.Job() j2 := mock.Job() j2.ID = fmt.Sprintf("%s-more", j.ID) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, j2)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, j)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1001, nil, j2)) // Query to check the job status - code := cmd.Run([]string{"-address=" + url, j.ID}) - must.Zero(t, code) + if code := cmd.Run([]string{"-address=" + url, j.ID}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } out := ui.OutputWriter.String() - must.StrContains(t, out, j.ID) + assert.Contains(out, j.ID) ui.OutputWriter.Reset() } func TestStatusCommand_Run_EvalStatus(t *testing.T) { + assert := assert.New(t) ci.Parallel(t) srv, _, url := testServer(t, true, nil) @@ -81,7 +87,7 @@ func TestStatusCommand_Run_EvalStatus(t *testing.T) { // Create a fake eval state := srv.Agent.Server().State() eval := mock.Eval() - must.NoError(t, state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) + assert.Nil(state.UpsertEvals(structs.MsgTypeTestSetup, 1000, []*structs.Evaluation{eval})) // Query to check the eval status if code := cmd.Run([]string{"-address=" + url, eval.ID}); code != 0 { @@ -89,12 +95,13 @@ func TestStatusCommand_Run_EvalStatus(t *testing.T) { } out := ui.OutputWriter.String() - must.StrContains(t, out, eval.ID[:shortId]) + assert.Contains(out, eval.ID[:shortId]) ui.OutputWriter.Reset() } func TestStatusCommand_Run_NodeStatus(t *testing.T) { + assert := assert.New(t) ci.Parallel(t) // Start in dev mode so we get a node registration @@ -128,12 +135,13 @@ func TestStatusCommand_Run_NodeStatus(t *testing.T) { } out := ui.OutputWriter.String() - must.StrContains(t, out, "mynode") + assert.Contains(out, "mynode") ui.OutputWriter.Reset() } func TestStatusCommand_Run_AllocStatus(t *testing.T) { + assert := assert.New(t) ci.Parallel(t) srv, _, url := testServer(t, true, nil) @@ -145,18 +153,20 @@ func TestStatusCommand_Run_AllocStatus(t *testing.T) { // Create a fake alloc state := srv.Agent.Server().State() alloc := mock.Alloc() - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) + assert.Nil(state.UpsertAllocs(structs.MsgTypeTestSetup, 1000, []*structs.Allocation{alloc})) - code := cmd.Run([]string{"-address=" + url, alloc.ID}) - must.Zero(t, code) + if code := cmd.Run([]string{"-address=" + url, alloc.ID}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } out := ui.OutputWriter.String() - must.StrContains(t, out, alloc.ID[:shortId]) + assert.Contains(out, alloc.ID[:shortId]) ui.OutputWriter.Reset() } func TestStatusCommand_Run_DeploymentStatus(t *testing.T) { + assert := assert.New(t) ci.Parallel(t) srv, _, url := testServer(t, true, nil) @@ -168,19 +178,21 @@ func TestStatusCommand_Run_DeploymentStatus(t *testing.T) { // Create a fake deployment state := srv.Agent.Server().State() deployment := mock.Deployment() - must.NoError(t, state.UpsertDeployment(1000, deployment)) + assert.Nil(state.UpsertDeployment(1000, deployment)) // Query to check the deployment status - code := cmd.Run([]string{"-address=" + url, deployment.ID}) - must.Zero(t, code) + if code := cmd.Run([]string{"-address=" + url, deployment.ID}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } out := ui.OutputWriter.String() - must.StrContains(t, out, deployment.ID[:shortId]) + assert.Contains(out, deployment.ID[:shortId]) ui.OutputWriter.Reset() } func TestStatusCommand_Run_NoPrefix(t *testing.T) { + assert := assert.New(t) ci.Parallel(t) srv, _, url := testServer(t, true, nil) @@ -192,19 +204,21 @@ func TestStatusCommand_Run_NoPrefix(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() job := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) // Query to check status - code := cmd.Run([]string{"-address=" + url}) - must.Zero(t, code) + if code := cmd.Run([]string{"-address=" + url}); code != 0 { + t.Fatalf("expected exit 0, got: %d", code) + } out := ui.OutputWriter.String() - must.StrContains(t, out, job.ID) + assert.Contains(out, job.ID) ui.OutputWriter.Reset() } func TestStatusCommand_AutocompleteArgs(t *testing.T) { + assert := assert.New(t) ci.Parallel(t) srv, _, url := testServer(t, true, nil) @@ -216,14 +230,14 @@ func TestStatusCommand_AutocompleteArgs(t *testing.T) { // Create a fake job state := srv.Agent.Server().State() job := mock.Job() - must.NoError(t, state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) + assert.Nil(state.UpsertJob(structs.MsgTypeTestSetup, 1000, nil, job)) prefix := job.ID[:len(job.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.SliceContains(t, res, job.ID) + assert.Contains(res, job.ID) } func TestStatusCommand_Run_HostNetwork(t *testing.T) { @@ -247,7 +261,7 @@ func TestStatusCommand_Run_HostNetwork(t *testing.T) { verbose: false, assertions: func(out string) { hostNetworksRegexpStr := `Host Networks\s+=\s+internal\n` - must.RegexMatch(t, regexp.MustCompile(hostNetworksRegexpStr), out) + require.Regexp(t, regexp.MustCompile(hostNetworksRegexpStr), out) }, }, { @@ -260,10 +274,10 @@ func TestStatusCommand_Run_HostNetwork(t *testing.T) { verbose: true, assertions: func(out string) { verboseHostNetworksHeadRegexpStr := `Name\s+CIDR\s+Interface\s+ReservedPorts\n` - must.RegexMatch(t, regexp.MustCompile(verboseHostNetworksHeadRegexpStr), out) + require.Regexp(t, regexp.MustCompile(verboseHostNetworksHeadRegexpStr), out) verboseHostNetworksBodyRegexpStr := `internal\s+127\.0\.0\.1/8\s+lo\s+\n` - must.RegexMatch(t, regexp.MustCompile(verboseHostNetworksBodyRegexpStr), out) + require.Regexp(t, regexp.MustCompile(verboseHostNetworksBodyRegexpStr), out) }, }, { @@ -275,10 +289,10 @@ func TestStatusCommand_Run_HostNetwork(t *testing.T) { verbose: true, assertions: func(out string) { verboseHostNetworksHeadRegexpStr := `Name\s+CIDR\s+Interface\s+ReservedPorts\n` - must.RegexMatch(t, regexp.MustCompile(verboseHostNetworksHeadRegexpStr), out) + require.Regexp(t, regexp.MustCompile(verboseHostNetworksHeadRegexpStr), out) verboseHostNetworksBodyRegexpStr := `public\s+10\.199\.0\.200/24\s+\s+\n` - must.RegexMatch(t, regexp.MustCompile(verboseHostNetworksBodyRegexpStr), out) + require.Regexp(t, regexp.MustCompile(verboseHostNetworksBodyRegexpStr), out) }, }, { @@ -291,10 +305,10 @@ func TestStatusCommand_Run_HostNetwork(t *testing.T) { verbose: true, assertions: func(out string) { verboseHostNetworksHeadRegexpStr := `Name\s+CIDR\s+Interface\s+ReservedPorts\n` - must.RegexMatch(t, regexp.MustCompile(verboseHostNetworksHeadRegexpStr), out) + require.Regexp(t, regexp.MustCompile(verboseHostNetworksHeadRegexpStr), out) verboseHostNetworksBodyRegexpStr := `public\s+10\.199\.0\.200/24\s+\s+8080,8081\n` - must.RegexMatch(t, regexp.MustCompile(verboseHostNetworksBodyRegexpStr), out) + require.Regexp(t, regexp.MustCompile(verboseHostNetworksBodyRegexpStr), out) }, }, } diff --git a/command/testdata/example-short-bad.json b/command/testdata/example-short-bad.json index d3866e2534d6..8f6fcdb6d33e 100644 --- a/command/testdata/example-short-bad.json +++ b/command/testdata/example-short-bad.json @@ -90,10 +90,6 @@ "ShutdownDelay": null, "StopAfterClientDisconnect": null, "MaxClientDisconnect": null, - "Disconnect":{ - "StopAfterClient": null, - "LostAfter": null - }, "Scaling": null, "Consul": null } diff --git a/command/testdata/example-short.json b/command/testdata/example-short.json index 44c4a000c00d..a62b33b6405a 100644 --- a/command/testdata/example-short.json +++ b/command/testdata/example-short.json @@ -93,7 +93,6 @@ "ShutdownDelay": null, "StopAfterClientDisconnect": null, "MaxClientDisconnect": null, - "Disconnect": null, "Scaling": null, "Consul": null } diff --git a/command/tls_ca_create_test.go b/command/tls_ca_create_test.go index 30f70c0a9ef0..90f11037c7df 100644 --- a/command/tls_ca_create_test.go +++ b/command/tls_ca_create_test.go @@ -11,14 +11,14 @@ import ( "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestCACreateCommand(t *testing.T) { testDir := t.TempDir() previousDirectory, err := os.Getwd() - must.NoError(t, err) - must.NoError(t, os.Chdir(testDir)) + require.NoError(t, err) + require.NoError(t, os.Chdir(testDir)) defer os.Chdir(previousDirectory) type testcase struct { @@ -35,9 +35,9 @@ func TestCACreateCommand(t *testing.T) { "nomad-agent-ca.pem", "nomad-agent-ca-key.pem", func(t *testing.T, cert *x509.Certificate) { - must.Eq(t, 1825*24*time.Hour, time.Until(cert.NotAfter).Round(24*time.Hour)) - must.False(t, cert.PermittedDNSDomainsCritical) - must.SliceEmpty(t, cert.PermittedDNSDomains) + require.Equal(t, 1825*24*time.Hour, time.Until(cert.NotAfter).Round(24*time.Hour)) + require.False(t, cert.PermittedDNSDomainsCritical) + require.Len(t, cert.PermittedDNSDomains, 0) }, }, {"ca custom domain", @@ -48,7 +48,7 @@ func TestCACreateCommand(t *testing.T) { "foo.com-agent-ca.pem", "foo.com-agent-ca-key.pem", func(t *testing.T, cert *x509.Certificate) { - must.SliceContainsAll(t, cert.PermittedDNSDomains, []string{"nomad", "foo.com", "localhost"}) + require.ElementsMatch(t, cert.PermittedDNSDomains, []string{"nomad", "foo.com", "localhost"}) }, }, {"ca options", @@ -65,14 +65,14 @@ func TestCACreateCommand(t *testing.T) { "foo-agent-ca.pem", "foo-agent-ca-key.pem", func(t *testing.T, cert *x509.Certificate) { - must.Eq(t, 365*24*time.Hour, time.Until(cert.NotAfter).Round(24*time.Hour)) - must.True(t, cert.PermittedDNSDomainsCritical) - must.Len(t, 4, cert.PermittedDNSDomains) - must.SliceContainsAll(t, cert.PermittedDNSDomains, []string{"nomad", "foo", "localhost", "bar"}) - must.Eq(t, cert.Issuer.Organization, []string{"CustOrg"}) - must.Eq(t, cert.Issuer.OrganizationalUnit, []string{"CustOrgUnit"}) - must.Eq(t, cert.Issuer.Country, []string{"ZZ"}) - must.StrHasPrefix(t, "CustomCA", cert.Issuer.CommonName) + require.Equal(t, 365*24*time.Hour, time.Until(cert.NotAfter).Round(24*time.Hour)) + require.True(t, cert.PermittedDNSDomainsCritical) + require.Len(t, cert.PermittedDNSDomains, 4) + require.ElementsMatch(t, cert.PermittedDNSDomains, []string{"nomad", "foo", "localhost", "bar"}) + require.Equal(t, cert.Issuer.Organization, []string{"CustOrg"}) + require.Equal(t, cert.Issuer.OrganizationalUnit, []string{"CustOrgUnit"}) + require.Equal(t, cert.Issuer.Country, []string{"ZZ"}) + require.Contains(t, cert.Issuer.CommonName, "CustomCA") }, }, {"ca custom date", @@ -82,7 +82,7 @@ func TestCACreateCommand(t *testing.T) { "nomad-agent-ca.pem", "nomad-agent-ca-key.pem", func(t *testing.T, cert *x509.Certificate) { - must.Eq(t, 365*24*time.Hour, time.Until(cert.NotAfter).Round(24*time.Hour)) + require.Equal(t, 365*24*time.Hour, time.Until(cert.NotAfter).Round(24*time.Hour)) }, }, } @@ -91,20 +91,20 @@ func TestCACreateCommand(t *testing.T) { t.Run(tc.name, func(t *testing.T) { ui := cli.NewMockUi() cmd := &TLSCACreateCommand{Meta: Meta{Ui: ui}} - must.Zero(t, cmd.Run(tc.args)) - must.Eq(t, "", ui.ErrorWriter.String()) + require.Equal(t, 0, cmd.Run(tc.args), ui.ErrorWriter.String()) + require.Equal(t, "", ui.ErrorWriter.String()) // is a valid key key := testutil.IsValidSigner(t, tc.keyPath) - must.True(t, key) + require.True(t, key) // is a valid ca expects the ca ca := testutil.IsValidCertificate(t, tc.caPath) - must.True(t, ca.BasicConstraintsValid) - must.Eq(t, x509.KeyUsageCertSign|x509.KeyUsageCRLSign|x509.KeyUsageDigitalSignature, ca.KeyUsage) - must.True(t, ca.IsCA) - must.Eq(t, ca.AuthorityKeyId, ca.SubjectKeyId) + require.True(t, ca.BasicConstraintsValid) + require.Equal(t, x509.KeyUsageCertSign|x509.KeyUsageCRLSign|x509.KeyUsageDigitalSignature, ca.KeyUsage) + require.True(t, ca.IsCA) + require.Equal(t, ca.AuthorityKeyId, ca.SubjectKeyId) tc.extraCheck(t, ca) - must.NoError(t, os.Remove(tc.caPath)) - must.NoError(t, os.Remove(tc.keyPath)) + require.NoError(t, os.Remove(tc.caPath)) + require.NoError(t, os.Remove(tc.keyPath)) }) } } diff --git a/command/tls_cert_create_test.go b/command/tls_cert_create_test.go index b48c1fc00411..68f92c4d1d4e 100644 --- a/command/tls_cert_create_test.go +++ b/command/tls_cert_create_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/testutil" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestTlsCertCreateCommand_InvalidArgs(t *testing.T) { @@ -48,12 +48,12 @@ func TestTlsCertCreateCommand_InvalidArgs(t *testing.T) { ci.Parallel(t) ui := cli.NewMockUi() cmd := &TLSCertCreateCommand{Meta: Meta{Ui: ui}} - must.Positive(t, cmd.Run(tc.args)) + require.NotEqual(t, 0, cmd.Run(tc.args)) got := ui.ErrorWriter.String() if tc.expectErr == "" { - must.NotNil(t, got) + require.NotEmpty(t, got) // don't care } else { - must.StrContains(t, got, tc.expectErr) + require.Contains(t, got, tc.expectErr) } }) } @@ -62,8 +62,8 @@ func TestTlsCertCreateCommand_InvalidArgs(t *testing.T) { func TestTlsCertCreateCommandDefaults_fileCreate(t *testing.T) { testDir := t.TempDir() previousDirectory, err := os.Getwd() - must.NoError(t, err) - must.NoError(t, os.Chdir(testDir)) + require.NoError(t, err) + require.NoError(t, os.Chdir(testDir)) defer os.Chdir(previousDirectory) ui := cli.NewMockUi() @@ -143,34 +143,34 @@ func TestTlsCertCreateCommandDefaults_fileCreate(t *testing.T) { for _, tc := range cases { tc := tc - must.True(t, t.Run(tc.name, func(t *testing.T) { + require.True(t, t.Run(tc.name, func(t *testing.T) { ui := cli.NewMockUi() cmd := &TLSCertCreateCommand{Meta: Meta{Ui: ui}} - must.Zero(t, cmd.Run(tc.args)) - must.Eq(t, tc.errOut, ui.ErrorWriter.String()) + require.Equal(t, 0, cmd.Run(tc.args)) + require.Equal(t, tc.errOut, ui.ErrorWriter.String()) // is a valid cert expects the cert cert := testutil.IsValidCertificate(t, tc.certPath) - must.Eq(t, tc.expectCN, cert.Subject.CommonName) - must.True(t, cert.BasicConstraintsValid) - must.Eq(t, x509.KeyUsageDigitalSignature|x509.KeyUsageKeyEncipherment, cert.KeyUsage) + require.Equal(t, tc.expectCN, cert.Subject.CommonName) + require.True(t, cert.BasicConstraintsValid) + require.Equal(t, x509.KeyUsageDigitalSignature|x509.KeyUsageKeyEncipherment, cert.KeyUsage) switch tc.typ { case "server": - must.Eq(t, + require.Equal(t, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, cert.ExtKeyUsage) case "client": - must.Eq(t, + require.Equal(t, []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth, x509.ExtKeyUsageClientAuth}, cert.ExtKeyUsage) case "cli": - must.Eq(t, + require.Equal(t, []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth}, cert.ExtKeyUsage) } - must.False(t, cert.IsCA) - must.Eq(t, tc.expectDNS, cert.DNSNames) - must.Eq(t, tc.expectIP, cert.IPAddresses) + require.False(t, cert.IsCA) + require.Equal(t, tc.expectDNS, cert.DNSNames) + require.Equal(t, tc.expectIP, cert.IPAddresses) })) } } @@ -310,7 +310,7 @@ func TestTlsRecordPreparation(t *testing.T) { for _, tc := range cases { tc := tc - must.True(t, t.Run(tc.name, func(t *testing.T) { + require.True(t, t.Run(tc.name, func(t *testing.T) { var ipAddresses []net.IP for _, i := range tc.ipAddresses { if len(i) > 0 { @@ -319,11 +319,11 @@ func TestTlsRecordPreparation(t *testing.T) { } ipAddresses, dnsNames, name, extKeyUsage, prefix := recordPreparation(tc.certType, tc.regionName, tc.domain, tc.dnsNames, ipAddresses) - must.Eq(t, tc.expectedipAddresses, ipAddresses) - must.Eq(t, tc.expectedDNSNames, dnsNames) - must.Eq(t, tc.expectedName, name) - must.Eq(t, tc.expectedextKeyUsage, extKeyUsage) - must.Eq(t, tc.expectedPrefix, prefix) + require.Equal(t, tc.expectedipAddresses, ipAddresses) + require.Equal(t, tc.expectedDNSNames, dnsNames) + require.Equal(t, tc.expectedName, name) + require.Equal(t, tc.expectedextKeyUsage, extKeyUsage) + require.Equal(t, tc.expectedPrefix, prefix) })) } } diff --git a/command/ui_test.go b/command/ui_test.go index 144342fcc297..7cfcf10a58d9 100644 --- a/command/ui_test.go +++ b/command/ui_test.go @@ -9,7 +9,7 @@ import ( "testing" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestCommand_Ui(t *testing.T) { @@ -102,12 +102,13 @@ func TestCommand_Ui(t *testing.T) { // Don't try to open a browser. args := append(tc.Args, "-show-url") - code := cmd.Run(args) - must.Zero(t, code) + if code := cmd.Run(args); code != 0 { + require.Equal(t, 0, code, "expected exit code 0, got %d", code) + } got := ui.OutputWriter.String() expected := fmt.Sprintf("URL for web UI: %s", tc.ExpectedURL) - must.Eq(t, expected, strings.TrimSpace(got)) + require.Equal(t, expected, strings.TrimSpace(got)) }) } } diff --git a/command/var_get_test.go b/command/var_get_test.go index 16d33de02c59..c0aaab4c0df7 100644 --- a/command/var_get_test.go +++ b/command/var_get_test.go @@ -12,7 +12,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestVarGetCommand_Implements(t *testing.T) { @@ -28,17 +28,18 @@ func TestVarGetCommand_Fails(t *testing.T) { cmd := &VarGetCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{"some", "bad", "args"}) out := ui.ErrorWriter.String() - must.One(t, code) - must.StrContains(t, out, commandErrorText(cmd)) + require.Equal(t, 1, code, "expected exit code 1, got: %d") + require.Contains(t, out, commandErrorText(cmd), "expected help output, got: %s", out) }) t.Run("bad_address", func(t *testing.T) { ci.Parallel(t) ui := cli.NewMockUi() cmd := &VarGetCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{"-address=nope", "foo"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "retrieving variable") - must.Eq(t, "", ui.OutputWriter.String()) + out := ui.ErrorWriter.String() + require.Equal(t, 1, code, "expected exit code 1, got: %d") + require.Contains(t, ui.ErrorWriter.String(), "retrieving variable", "connection error, got: %s", out) + require.Zero(t, ui.OutputWriter.String()) }) t.Run("missing_template", func(t *testing.T) { ci.Parallel(t) @@ -46,9 +47,9 @@ func TestVarGetCommand_Fails(t *testing.T) { cmd := &VarGetCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{`-out=go-template`, "foo"}) out := strings.TrimSpace(ui.ErrorWriter.String()) - must.One(t, code) - must.Eq(t, errMissingTemplate+"\n"+commandErrorText(cmd), out) - must.Eq(t, "", ui.OutputWriter.String()) + require.Equal(t, 1, code, "expected exit code 1, got: %d", code) + require.Equal(t, errMissingTemplate+"\n"+commandErrorText(cmd), out) + require.Zero(t, ui.OutputWriter.String()) }) t.Run("unexpected_template", func(t *testing.T) { ci.Parallel(t) @@ -56,9 +57,9 @@ func TestVarGetCommand_Fails(t *testing.T) { cmd := &VarGetCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{`-out=json`, `-template="bad"`, "foo"}) out := strings.TrimSpace(ui.ErrorWriter.String()) - must.One(t, code) - must.Eq(t, errUnexpectedTemplate+"\n"+commandErrorText(cmd), out) - must.Eq(t, "", ui.OutputWriter.String()) + require.Equal(t, 1, code, "expected exit code 1, got: %d", code) + require.Equal(t, errUnexpectedTemplate+"\n"+commandErrorText(cmd), out) + require.Zero(t, ui.OutputWriter.String()) }) } @@ -109,7 +110,7 @@ func TestVarGetCommand(t *testing.T) { // Create a namespace for the test case testNS := strings.Map(validNS, t.Name()) _, err = client.Namespaces().Register(&api.Namespace{Name: testNS}, nil) - must.NoError(t, err) + require.NoError(t, err) t.Cleanup(func() { _, _ = client.Namespaces().Delete(testNS, nil) }) @@ -118,7 +119,7 @@ func TestVarGetCommand(t *testing.T) { sv := testVariable() sv.Namespace = testNS sv, _, err = client.Variables().Create(sv, nil) - must.NoError(t, err) + require.NoError(t, err) t.Cleanup(func() { _, _ = client.Variables().Delete(sv.Path, nil) }) @@ -142,22 +143,22 @@ func TestVarGetCommand(t *testing.T) { code := cmd.Run(args) // Check the output - must.Eq(t, tc.exitCode, code) + require.Equal(t, tc.exitCode, code, "expected exit %v, got: %d; %v", tc.exitCode, code, ui.ErrorWriter.String()) if tc.isError { - must.Eq(t, tc.expected, strings.TrimSpace(ui.ErrorWriter.String())) + require.Equal(t, tc.expected, strings.TrimSpace(ui.ErrorWriter.String())) return } switch tc.format { case "json": - must.Eq(t, sv.AsPrettyJSON(), strings.TrimSpace(ui.OutputWriter.String())) + require.Equal(t, sv.AsPrettyJSON(), strings.TrimSpace(ui.OutputWriter.String())) case "table": out := ui.OutputWriter.String() outs := strings.Split(out, "\n") - must.Len(t, 9, outs) - must.Eq(t, "Namespace = "+testNS, outs[0]) - must.Eq(t, "Path = test/var", outs[1]) + require.Len(t, outs, 9) + require.Equal(t, "Namespace = "+testNS, outs[0]) + require.Equal(t, "Path = test/var", outs[1]) case "go-template": - must.Eq(t, tc.expected, strings.TrimSpace(ui.OutputWriter.String())) + require.Equal(t, tc.expected, strings.TrimSpace(ui.OutputWriter.String())) default: t.Fatalf("invalid format: %q", tc.format) } @@ -172,7 +173,7 @@ func TestVarGetCommand(t *testing.T) { // Create a var testNS := strings.Map(validNS, t.Name()) _, err := client.Namespaces().Register(&api.Namespace{Name: testNS}, nil) - must.NoError(t, err) + require.NoError(t, err) t.Cleanup(func() { _, _ = client.Namespaces().Delete(testNS, nil) }) @@ -181,7 +182,7 @@ func TestVarGetCommand(t *testing.T) { sv.Path = "special/variable" sv.Namespace = testNS sv, _, err = client.Variables().Create(sv, nil) - must.NoError(t, err) + require.NoError(t, err) t.Cleanup(func() { _, _ = client.Variables().Delete(sv.Path, nil) }) @@ -190,8 +191,8 @@ func TestVarGetCommand(t *testing.T) { predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, sv.Path, res[0]) + require.Equal(t, 1, len(res)) + require.Equal(t, sv.Path, res[0]) }) } diff --git a/command/var_init_test.go b/command/var_init_test.go index 7d835ab62071..86523c356cb2 100644 --- a/command/var_init_test.go +++ b/command/var_init_test.go @@ -10,7 +10,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestVarInitCommand_Implements(t *testing.T) { @@ -22,9 +22,9 @@ func TestVarInitCommand_Run(t *testing.T) { ci.Parallel(t) dir := t.TempDir() origDir, err := os.Getwd() - must.NoError(t, err) + require.NoError(t, err) err = os.Chdir(dir) - must.NoError(t, err) + require.NoError(t, err) t.Cleanup(func() { os.Chdir(origDir) }) t.Run("hcl", func(t *testing.T) { @@ -35,41 +35,41 @@ func TestVarInitCommand_Run(t *testing.T) { // Fails on misuse ec := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, ec) - must.StrContains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) - must.Eq(t, "", ui.OutputWriter.String()) + require.Equal(t, 1, ec) + require.Contains(t, ui.ErrorWriter.String(), commandErrorText(cmd)) + require.Empty(t, ui.OutputWriter.String()) reset(ui) // Works if the file doesn't exist ec = cmd.Run([]string{"-out", "hcl"}) - must.Eq(t, "", ui.ErrorWriter.String()) - must.Eq(t, "Example variable specification written to spec.nv.hcl\n", ui.OutputWriter.String()) - must.Zero(t, ec) + require.Empty(t, ui.ErrorWriter.String()) + require.Equal(t, "Example variable specification written to spec.nv.hcl\n", ui.OutputWriter.String()) + require.Zero(t, ec) reset(ui) t.Cleanup(func() { os.Remove(path.Join(dir, "spec.nv.hcl")) }) content, err := os.ReadFile(DefaultHclVarInitName) - must.NoError(t, err) - must.Eq(t, defaultHclVarSpec, string(content)) + require.NoError(t, err) + require.Equal(t, defaultHclVarSpec, string(content)) // Fails if the file exists ec = cmd.Run([]string{"-out", "hcl"}) - must.StrContains(t, ui.ErrorWriter.String(), "exists") - must.Eq(t, "", ui.OutputWriter.String()) - must.One(t, ec) + require.Contains(t, ui.ErrorWriter.String(), "exists") + require.Empty(t, ui.OutputWriter.String()) + require.Equal(t, 1, ec) reset(ui) // Works if file is passed ec = cmd.Run([]string{"-out", "hcl", "myTest.hcl"}) - must.Eq(t, "", ui.ErrorWriter.String()) - must.Eq(t, "Example variable specification written to myTest.hcl\n", ui.OutputWriter.String()) - must.Zero(t, ec) + require.Empty(t, ui.ErrorWriter.String()) + require.Equal(t, "Example variable specification written to myTest.hcl\n", ui.OutputWriter.String()) + require.Zero(t, ec) reset(ui) t.Cleanup(func() { os.Remove(path.Join(dir, "myTest.hcl")) }) content, err = os.ReadFile("myTest.hcl") - must.NoError(t, err) - must.Eq(t, defaultHclVarSpec, string(content)) + require.NoError(t, err) + require.Equal(t, defaultHclVarSpec, string(content)) }) t.Run("json", func(t *testing.T) { ci.Parallel(t) @@ -79,41 +79,41 @@ func TestVarInitCommand_Run(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "This command takes no arguments or one") - must.Eq(t, "", ui.OutputWriter.String()) + require.Equal(t, 1, code) + require.Contains(t, ui.ErrorWriter.String(), "This command takes no arguments or one") + require.Empty(t, ui.OutputWriter.String()) reset(ui) // Works if the file doesn't exist code = cmd.Run([]string{"-out", "json"}) - must.StrContains(t, ui.ErrorWriter.String(), "REMINDER: While keys") - must.StrContains(t, ui.OutputWriter.String(), "Example variable specification written to spec.nv.json\n") - must.Zero(t, code) + require.Contains(t, ui.ErrorWriter.String(), "REMINDER: While keys") + require.Contains(t, ui.OutputWriter.String(), "Example variable specification written to spec.nv.json\n") + require.Zero(t, code) reset(ui) t.Cleanup(func() { os.Remove(path.Join(dir, "spec.nv.json")) }) content, err := os.ReadFile(DefaultJsonVarInitName) - must.NoError(t, err) - must.Eq(t, defaultJsonVarSpec, string(content)) + require.NoError(t, err) + require.Equal(t, defaultJsonVarSpec, string(content)) // Fails if the file exists code = cmd.Run([]string{"-out", "json"}) - must.StrContains(t, ui.ErrorWriter.String(), "exists") - must.Eq(t, "", ui.OutputWriter.String()) - must.One(t, code) + require.Contains(t, ui.ErrorWriter.String(), "exists") + require.Empty(t, ui.OutputWriter.String()) + require.Equal(t, 1, code) reset(ui) // Works if file is passed code = cmd.Run([]string{"-out", "json", "myTest.json"}) - must.StrContains(t, ui.ErrorWriter.String(), "REMINDER: While keys") - must.StrContains(t, ui.OutputWriter.String(), "Example variable specification written to myTest.json\n") - must.Zero(t, code) + require.Contains(t, ui.ErrorWriter.String(), "REMINDER: While keys") + require.Contains(t, ui.OutputWriter.String(), "Example variable specification written to myTest.json\n") + require.Zero(t, code) reset(ui) t.Cleanup(func() { os.Remove(path.Join(dir, "myTest.json")) }) content, err = os.ReadFile("myTest.json") - must.NoError(t, err) - must.Eq(t, defaultJsonVarSpec, string(content)) + require.NoError(t, err) + require.Equal(t, defaultJsonVarSpec, string(content)) }) } diff --git a/command/var_list_test.go b/command/var_list_test.go index 3194cea46cea..ce18d99e256f 100644 --- a/command/var_list_test.go +++ b/command/var_list_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestVarListCommand_Implements(t *testing.T) { @@ -80,29 +81,36 @@ func TestVarListCommand_Offline(t *testing.T) { errOut := ui.ErrorWriter.String() defer resetUiWriters(ui) - must.Eq(t, tC.exitCode, ec) + require.Equal(t, tC.exitCode, ec, + "Expected exit code %v; got: %v\nstdout: %s\nstderr: %s", + tC.exitCode, ec, stdOut, errOut, + ) if tC.expectUsage { help := cmd.Help() - must.Eq(t, help, strings.TrimSpace(stdOut)) + require.Equal(t, help, strings.TrimSpace(stdOut)) // Test that stdout ends with a linefeed since we trim them for // convenience in the equality tests. - must.True(t, strings.HasSuffix(stdOut, "\n")) + require.True(t, strings.HasSuffix(stdOut, "\n"), + "stdout does not end with a linefeed") } if tC.expectUsageError { - must.StrContains(t, errOut, commandErrorText(cmd)) + require.Contains(t, errOut, commandErrorText(cmd)) } if tC.expectStdOut != "" { - must.Eq(t, tC.expectStdOut, strings.TrimSpace(stdOut)) + require.Equal(t, tC.expectStdOut, strings.TrimSpace(stdOut)) // Test that stdout ends with a linefeed since we trim them for // convenience in the equality tests. - must.True(t, strings.HasSuffix(stdOut, "\n")) + require.True(t, strings.HasSuffix(stdOut, "\n"), + "stdout does not end with a linefeed") } if tC.expectStdErrPrefix != "" { - must.True(t, strings.HasPrefix(errOut, tC.expectStdErrPrefix)) - + require.True(t, strings.HasPrefix(errOut, tC.expectStdErrPrefix), + "Expected stderr to start with %q; got %s", + tC.expectStdErrPrefix, errOut) // Test that stderr ends with a linefeed since we trim them for // convenience in the equality tests. - must.True(t, strings.HasSuffix(errOut, "\n")) + require.True(t, strings.HasSuffix(errOut, "\n"), + "stderr does not end with a linefeed") } }) } @@ -132,10 +140,10 @@ func TestVarListCommand_Online(t *testing.T) { expect := expect exp, ok := expect.(NSPather) - must.True(t, ok) + require.True(t, ok, "expect is not an NSPather, got %T", expect) in, ok := check.(NSPather) - must.True(t, ok) - must.Eq(t, exp.NSPaths(), in.NSPaths()) + require.True(t, ok, "check is not an NSPather, got %T", check) + require.ElementsMatch(t, exp.NSPaths(), in.NSPaths()) } return out } @@ -145,9 +153,11 @@ func TestVarListCommand_Online(t *testing.T) { length := length in, ok := check.(NSPather) - must.True(t, ok) + require.True(t, ok, "check is not an NSPather, got %T", check) inLen := in.NSPaths().Len() - must.Eq(t, length, inLen) + require.Equal(t, length, inLen, + "expected length of %v, got %v. \nvalues: %v", + length, inLen, in.NSPaths()) } return out } @@ -275,28 +285,34 @@ func TestVarListCommand_Online(t *testing.T) { errOut := ui.ErrorWriter.String() defer resetUiWriters(ui) - must.Eq(t, tC.exitCode, code) + require.Equal(t, tC.exitCode, code, + "Expected exit code %v; got: %v\nstdout: %s\nstderr: %s", + tC.exitCode, code, stdOut, errOut) if tC.expectStdOut != "" { - must.Eq(t, tC.expectStdOut, strings.TrimSpace(stdOut)) + require.Equal(t, tC.expectStdOut, strings.TrimSpace(stdOut)) // Test that stdout ends with a linefeed since we trim them for // convenience in the equality tests. - must.True(t, strings.HasSuffix(stdOut, "\n")) + require.True(t, strings.HasSuffix(stdOut, "\n"), + "stdout does not end with a linefeed") } if tC.expectStdErrPrefix != "" { - must.True(t, strings.HasPrefix(errOut, tC.expectStdErrPrefix)) + require.True(t, strings.HasPrefix(errOut, tC.expectStdErrPrefix), + "Expected stderr to start with %q; got %s", + tC.expectStdErrPrefix, errOut) // Test that stderr ends with a linefeed since this test only // considers prefixes. - must.True(t, strings.HasSuffix(stdOut, "\n")) + require.True(t, strings.HasSuffix(stdOut, "\n"), + "stderr does not end with a linefeed") } if tC.jsonTest != nil { jtC := tC.jsonTest err := json.Unmarshal([]byte(stdOut), &jtC.jsonDest) - must.NoError(t, err) + require.NoError(t, err, "stdout: %s", stdOut) for _, fn := range jtC.expectFns { fn(t, jtC.jsonDest) diff --git a/command/var_lock_test.go b/command/var_lock_test.go index 87e1ea06069a..4df597a8cde8 100644 --- a/command/var_lock_test.go +++ b/command/var_lock_test.go @@ -12,6 +12,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestVarLockCommand_Implements(t *testing.T) { @@ -98,7 +99,7 @@ func TestVarLockCommand_Good(t *testing.T) { // Get the variable code := cmd.Run([]string{"-address=" + url, "test/var/shell", "touch ", filePath}) - must.Zero(t, code) + require.Equal(t, 0, code, "expected exit 0, got: %d; %v", code, ui.ErrorWriter.String()) sv, _, err := srv.APIClient().Variables().Peek("test/var/shell", nil) must.NoError(t, err) @@ -132,7 +133,7 @@ func TestVarLockCommand_Good_NoShell(t *testing.T) { // Get the variable code := cmd.Run([]string{"-address=" + url, "-shell=false", "test/var/noShell", "touch", filePath}) - must.Zero(t, code) + require.Zero(t, 0, code) sv, _, err := srv.APIClient().Variables().Peek("test/var/noShell", nil) must.NoError(t, err) diff --git a/command/var_purge_test.go b/command/var_purge_test.go index 94727c51da07..a2214acded7a 100644 --- a/command/var_purge_test.go +++ b/command/var_purge_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestVarPurgeCommand_Implements(t *testing.T) { @@ -27,17 +27,18 @@ func TestVarPurgeCommand_Fails(t *testing.T) { cmd := &VarPurgeCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{"some", "bad", "args"}) out := ui.ErrorWriter.String() - must.One(t, code) - must.StrContains(t, out, commandErrorText(cmd)) + require.Equal(t, 1, code, "expected exit code 1, got: %d") + require.Contains(t, out, commandErrorText(cmd), "expected help output, got: %s", out) }) t.Run("bad_address", func(t *testing.T) { ci.Parallel(t) ui := cli.NewMockUi() cmd := &VarPurgeCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{"-address=nope", "foo"}) - must.One(t, code) - must.StrContains(t, ui.ErrorWriter.String(), "purging variable") - must.Eq(t, "", ui.OutputWriter.String()) + out := ui.ErrorWriter.String() + require.Equal(t, 1, code, "expected exit code 1, got: %d") + require.Contains(t, ui.ErrorWriter.String(), "purging variable", "connection error, got: %s", out) + require.Zero(t, ui.OutputWriter.String()) }) t.Run("bad_check_index/syntax", func(t *testing.T) { ci.Parallel(t) @@ -45,9 +46,9 @@ func TestVarPurgeCommand_Fails(t *testing.T) { cmd := &VarPurgeCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{`-check-index=a`, "foo"}) out := strings.TrimSpace(ui.ErrorWriter.String()) - must.One(t, code) - must.Eq(t, `Invalid -check-index value "a": not parsable as uint64`, out) - must.Eq(t, "", ui.OutputWriter.String()) + require.Equal(t, 1, code, "expected exit code 1, got: %d", code) + require.Equal(t, `Invalid -check-index value "a": not parsable as uint64`, out) + require.Zero(t, ui.OutputWriter.String()) }) t.Run("bad_check_index/range", func(t *testing.T) { ci.Parallel(t) @@ -55,9 +56,9 @@ func TestVarPurgeCommand_Fails(t *testing.T) { cmd := &VarPurgeCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{`-check-index=18446744073709551616`, "foo"}) out := strings.TrimSpace(ui.ErrorWriter.String()) - must.One(t, code) - must.Eq(t, `Invalid -check-index value "18446744073709551616": out of range for uint64`, out) - must.Eq(t, "", ui.OutputWriter.String()) + require.Equal(t, 1, code, "expected exit code 1, got: %d", code) + require.Equal(t, `Invalid -check-index value "18446744073709551616": out of range for uint64`, out) + require.Zero(t, ui.OutputWriter.String()) }) } @@ -75,16 +76,16 @@ func TestVarPurgeCommand_Online(t *testing.T) { // Create a var to delete sv := testVariable() _, _, err := client.Variables().Create(sv, nil) - must.NoError(t, err) + require.NoError(t, err) t.Cleanup(func() { _, _ = client.Variables().Delete(sv.Path, nil) }) // Delete the variable code := cmd.Run([]string{"-address=" + url, sv.Path}) - must.Zero(t, code) + require.Equal(t, 0, code, "expected exit 0, got: %d; %v", code, ui.ErrorWriter.String()) vars, _, err := client.Variables().List(nil) - must.NoError(t, err) - must.SliceEmpty(t, vars) + require.NoError(t, err) + require.Len(t, vars, 0) }) t.Run("unchecked", func(t *testing.T) { @@ -95,20 +96,20 @@ func TestVarPurgeCommand_Online(t *testing.T) { // Create a var to delete sv := testVariable() sv, _, err := client.Variables().Create(sv, nil) - must.NoError(t, err) + require.NoError(t, err) // Delete a variable code := cmd.Run([]string{"-address=" + url, "-check-index=1", sv.Path}) stderr := ui.ErrorWriter.String() - must.One(t, code) - must.StrContains(t, stderr, "\nCheck-and-Set conflict\n\n Your provided check-index (1)") + require.Equal(t, 1, code, "expected exit 1, got: %d; %v", code, stderr) + require.Contains(t, stderr, "\nCheck-and-Set conflict\n\n Your provided check-index (1)") code = cmd.Run([]string{"-address=" + url, fmt.Sprintf("-check-index=%v", sv.ModifyIndex), sv.Path}) - must.Zero(t, code) + require.Equal(t, 0, code, "expected exit 0, got: %d; %v", code, ui.ErrorWriter.String()) vars, _, err := client.Variables().List(nil) - must.NoError(t, err) - must.SliceEmpty(t, vars) + require.NoError(t, err) + require.Len(t, vars, 0) }) t.Run("autocompleteArgs", func(t *testing.T) { @@ -120,14 +121,14 @@ func TestVarPurgeCommand_Online(t *testing.T) { sv := testVariable() sv.Path = "autocomplete/test" _, _, err := client.Variables().Create(sv, nil) - must.NoError(t, err) + require.NoError(t, err) t.Cleanup(func() { client.Variables().Delete(sv.Path, nil) }) args := complete.Args{Last: "aut"} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, sv.Path, res[0]) + require.Equal(t, 1, len(res)) + require.Equal(t, sv.Path, res[0]) }) } diff --git a/command/var_put_test.go b/command/var_put_test.go index a6fd6cd3b0c1..b724f201f504 100644 --- a/command/var_put_test.go +++ b/command/var_put_test.go @@ -16,6 +16,7 @@ import ( "github.com/mitchellh/cli" "github.com/posener/complete" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestVarPutCommand_Implements(t *testing.T) { @@ -30,8 +31,8 @@ func TestVarPutCommand_Fails(t *testing.T) { cmd := &VarPutCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{"-bad-flag"}) out := ui.ErrorWriter.String() - must.One(t, code) - must.StrContains(t, out, commandErrorText(cmd)) + require.Equal(t, 1, code, "expected exit code 1, got: %d") + require.Contains(t, out, commandErrorText(cmd), "expected help output, got: %s", out) }) t.Run("bad_address", func(t *testing.T) { ci.Parallel(t) @@ -39,8 +40,8 @@ func TestVarPutCommand_Fails(t *testing.T) { cmd := &VarPutCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{"-address=nope", "foo", "-"}) out := ui.ErrorWriter.String() - must.One(t, code) - must.StrContains(t, out, "Error creating variable") + require.Equal(t, 1, code, "expected exit code 1, got: %d") + require.Contains(t, out, "Error creating variable", "expected error creating variable, got: %s", out) }) t.Run("missing_template", func(t *testing.T) { ci.Parallel(t) @@ -48,8 +49,8 @@ func TestVarPutCommand_Fails(t *testing.T) { cmd := &VarPutCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{`-out=go-template`, "foo", "-"}) out := strings.TrimSpace(ui.ErrorWriter.String()) - must.One(t, code) - must.Eq(t, errMissingTemplate+"\n"+commandErrorText(cmd), out) + require.Equal(t, 1, code, "expected exit code 1, got: %d", code) + require.Equal(t, errMissingTemplate+"\n"+commandErrorText(cmd), out) }) t.Run("unexpected_template", func(t *testing.T) { ci.Parallel(t) @@ -57,8 +58,8 @@ func TestVarPutCommand_Fails(t *testing.T) { cmd := &VarPutCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{`-out=json`, `-template="bad"`, "foo", "-"}) out := strings.TrimSpace(ui.ErrorWriter.String()) - must.One(t, code) - must.Eq(t, errUnexpectedTemplate+"\n"+commandErrorText(cmd), out) + require.Equal(t, 1, code, "expected exit code 1, got: %d", code) + require.Equal(t, errUnexpectedTemplate+"\n"+commandErrorText(cmd), out) }) t.Run("bad_in", func(t *testing.T) { ci.Parallel(t) @@ -66,8 +67,8 @@ func TestVarPutCommand_Fails(t *testing.T) { cmd := &VarPutCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{`-in=bad`, "foo", "-"}) out := strings.TrimSpace(ui.ErrorWriter.String()) - must.One(t, code) - must.Eq(t, errInvalidInFormat+"\n"+commandErrorText(cmd), out) + require.Equal(t, 1, code, "expected exit code 1, got: %d", code) + require.Equal(t, errInvalidInFormat+"\n"+commandErrorText(cmd), out) }) t.Run("wildcard_namespace", func(t *testing.T) { ci.Parallel(t) @@ -75,8 +76,8 @@ func TestVarPutCommand_Fails(t *testing.T) { cmd := &VarPutCommand{Meta: Meta{Ui: ui}} code := cmd.Run([]string{`-namespace=*`, "foo", "-"}) out := strings.TrimSpace(ui.ErrorWriter.String()) - must.One(t, code) - must.Eq(t, errWildcardNamespaceNotAllowed, out) + require.Equal(t, 1, code, "expected exit code 1, got: %d", code) + require.Equal(t, errWildcardNamespaceNotAllowed, out) }) } @@ -92,7 +93,7 @@ func TestVarPutCommand_GoodJson(t *testing.T) { // Get the variable code := cmd.Run([]string{"-address=" + url, "-out=json", "test/var", "k1=v1", "k2=v2"}) - must.Zero(t, code) + require.Equal(t, 0, code, "expected exit 0, got: %d; %v", code, ui.ErrorWriter.String()) t.Cleanup(func() { _, _ = client.Variables().Delete("test/var", nil) @@ -101,10 +102,10 @@ func TestVarPutCommand_GoodJson(t *testing.T) { var outVar api.Variable b := ui.OutputWriter.Bytes() err := json.Unmarshal(b, &outVar) - must.NoError(t, err) - must.Eq(t, "default", outVar.Namespace) - must.Eq(t, "test/var", outVar.Path) - must.Eq(t, api.VariableItems{"k1": "v1", "k2": "v2"}, outVar.Items) + require.NoError(t, err, "error unmarshaling json: %v\nb: %s", err, b) + require.Equal(t, "default", outVar.Namespace) + require.Equal(t, "test/var", outVar.Path) + require.Equal(t, api.VariableItems{"k1": "v1", "k2": "v2"}, outVar.Items) } func TestVarPutCommand_FlagsWithSpec(t *testing.T) { @@ -152,14 +153,14 @@ func TestVarPutCommand_AutocompleteArgs(t *testing.T) { // Create a var sv := testVariable() _, _, err := client.Variables().Create(sv, nil) - must.NoError(t, err) + require.NoError(t, err) args := complete.Args{Last: "t"} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, sv.Path, res[0]) + require.Equal(t, 1, len(res)) + require.Equal(t, sv.Path, res[0]) } func TestVarPutCommand_KeyWarning(t *testing.T) { diff --git a/command/volume_register_test.go b/command/volume_register_test.go index 5a757674b8b0..b816a5b1e4e4 100644 --- a/command/volume_register_test.go +++ b/command/volume_register_test.go @@ -9,7 +9,7 @@ import ( "github.com/hashicorp/hcl" "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestVolumeDispatchParse(t *testing.T) { @@ -35,12 +35,13 @@ rando = "bar" for _, c := range cases { t.Run(c.hcl, func(t *testing.T) { _, s, err := parseVolumeType(c.hcl) - must.Eq(t, c.t, s) + require.Equal(t, c.t, s) if c.err == "" { - must.NoError(t, err) + require.NoError(t, err) } else { - must.ErrorContains(t, err, c.err) + require.Contains(t, err.Error(), c.err) } + }) } } @@ -192,14 +193,16 @@ topology_request { for _, c := range cases { t.Run(c.name, func(t *testing.T) { ast, err := hcl.ParseString(c.hcl) - must.NoError(t, err) + require.NoError(t, err) vol, err := csiDecodeVolume(ast) if c.err == "" { - must.NoError(t, err) + require.NoError(t, err) } else { - must.ErrorContains(t, err, c.err) + require.Contains(t, err.Error(), c.err) } - must.Eq(t, c.expected, vol) + require.Equal(t, c.expected, vol) + }) + } } diff --git a/command/volume_status_test.go b/command/volume_status_test.go index 0fde4610f6b2..d86b81cd9774 100644 --- a/command/volume_status_test.go +++ b/command/volume_status_test.go @@ -11,7 +11,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/mitchellh/cli" "github.com/posener/complete" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestCSIVolumeStatusCommand_Implements(t *testing.T) { @@ -26,10 +26,10 @@ func TestCSIVolumeStatusCommand_Fails(t *testing.T) { // Fails on misuse code := cmd.Run([]string{"some", "bad", "args"}) - must.One(t, code) + require.Equal(t, 1, code) out := ui.ErrorWriter.String() - must.StrContains(t, out, commandErrorText(cmd)) + require.Contains(t, out, commandErrorText(cmd)) ui.ErrorWriter.Reset() } @@ -50,13 +50,13 @@ func TestCSIVolumeStatusCommand_AutocompleteArgs(t *testing.T) { PluginID: "glade", } - must.NoError(t, state.UpsertCSIVolume(1000, []*structs.CSIVolume{vol})) + require.NoError(t, state.UpsertCSIVolume(1000, []*structs.CSIVolume{vol})) prefix := vol.ID[:len(vol.ID)-5] args := complete.Args{Last: prefix} predictor := cmd.AutocompleteArgs() res := predictor.Predict(args) - must.Len(t, 1, res) - must.Eq(t, vol.ID, res[0]) + require.Equal(t, 1, len(res)) + require.Equal(t, vol.ID, res[0]) } diff --git a/contributing/architecture-eval-triggers.md b/contributing/architecture-eval-triggers.md index af11f129005b..ece5c9aa5735 100644 --- a/contributing/architecture-eval-triggers.md +++ b/contributing/architecture-eval-triggers.md @@ -75,7 +75,7 @@ The list below covers each trigger and what can trigger it. * **job-scaling**: Scaling a Job will result in 1 Evaluation created, plus any follow-up Evaluations associated with scheduling, planning, or deployments. * **max-disconnect-timeout**: When an Allocation is in the `unknown` state for - longer than the [`disconnect.lost_after`][] window, the scheduler will create + longer than the [`max_client_disconnect`][] window, the scheduler will create 1 Evaluation. * **reconnect**: When a Node in the `disconnected` state reconnects, Nomad will create 1 Evaluation per job with an allocation on the reconnected Node. @@ -256,4 +256,4 @@ and eventually need to be garbage collected. [`structs.go`]: https://github.com/hashicorp/nomad/blob/v1.4.0-beta.1/nomad/structs/structs.go#L10857-L10875 [`update`]: https://developer.hashicorp.com/nomad/docs/job-specification/update [`restart` attempts]: https://developer.hashicorp.com/nomad/docs/job-specification/restart -[`disconnect.lost_after`]: https://developer.hashicorp.com/nomad/docs/job-specification/disconnect#lost_after +[`max_client_disconnect`]: https://developer.hashicorp.com/nomad/docs/job-specification/group#max-client-disconnect diff --git a/drivers/docker/config.go b/drivers/docker/config.go index f3f9fb69f7a9..699eeffbcb95 100644 --- a/drivers/docker/config.go +++ b/drivers/docker/config.go @@ -18,7 +18,6 @@ import ( "github.com/hashicorp/nomad/helper/pluginutils/loader" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/shared/hclspec" ) @@ -415,7 +414,7 @@ var ( driverCapabilities = &drivers.Capabilities{ SendSignals: true, Exec: true, - FSIsolation: fsisolation.Image, + FSIsolation: drivers.FSIsolationImage, NetIsolationModes: []drivers.NetIsolationMode{ drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup, diff --git a/drivers/docker/cpuset_test.go b/drivers/docker/cpuset_test.go index c4a7ceffb0eb..19cd32e09b62 100644 --- a/drivers/docker/cpuset_test.go +++ b/drivers/docker/cpuset_test.go @@ -1,8 +1,6 @@ // Copyright (c) HashiCorp, Inc. // SPDX-License-Identifier: BUSL-1.1 -//go:build linux - package docker import ( diff --git a/drivers/exec/driver.go b/drivers/exec/driver.go index 072b430a7151..02e4064724f9 100644 --- a/drivers/exec/driver.go +++ b/drivers/exec/driver.go @@ -24,7 +24,6 @@ import ( "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/drivers/utils" "github.com/hashicorp/nomad/plugins/shared/hclspec" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" @@ -101,7 +100,7 @@ var ( driverCapabilities = &drivers.Capabilities{ SendSignals: true, Exec: true, - FSIsolation: fsisolation.Chroot, + FSIsolation: drivers.FSIsolationChroot, NetIsolationModes: []drivers.NetIsolationMode{ drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup, diff --git a/drivers/java/driver.go b/drivers/java/driver.go index 829f59f65655..acaeae65108d 100644 --- a/drivers/java/driver.go +++ b/drivers/java/driver.go @@ -22,7 +22,6 @@ import ( "github.com/hashicorp/nomad/helper/pluginutils/loader" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/drivers/utils" "github.com/hashicorp/nomad/plugins/shared/hclspec" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" @@ -105,7 +104,7 @@ var ( driverCapabilities = &drivers.Capabilities{ SendSignals: false, Exec: false, - FSIsolation: fsisolation.None, + FSIsolation: drivers.FSIsolationNone, NetIsolationModes: []drivers.NetIsolationMode{ drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup, @@ -118,7 +117,7 @@ var ( func init() { if runtime.GOOS == "linux" { - driverCapabilities.FSIsolation = fsisolation.Chroot + driverCapabilities.FSIsolation = drivers.FSIsolationChroot driverCapabilities.MountConfigs = drivers.MountConfigSupportAll } } @@ -456,7 +455,7 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive executorConfig := &executor.ExecutorConfig{ LogFile: pluginLogFile, LogLevel: "debug", - FSIsolation: driverCapabilities.FSIsolation == fsisolation.Chroot, + FSIsolation: driverCapabilities.FSIsolation == drivers.FSIsolationChroot, Compute: d.nomadConfig.Topology.Compute(), } diff --git a/drivers/mock/driver.go b/drivers/mock/driver.go index ccd8a5ea6ad6..379df111582d 100644 --- a/drivers/mock/driver.go +++ b/drivers/mock/driver.go @@ -20,7 +20,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/shared/hclspec" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" ) @@ -64,7 +63,7 @@ var ( configSpec = hclspec.NewObject(map[string]*hclspec.Spec{ "fs_isolation": hclspec.NewDefault( hclspec.NewAttr("fs_isolation", "string", false), - hclspec.NewLiteral(fmt.Sprintf("%q", fsisolation.None)), + hclspec.NewLiteral(fmt.Sprintf("%q", drivers.FSIsolationNone)), ), "shutdown_periodic_after": hclspec.NewDefault( hclspec.NewAttr("shutdown_periodic_after", "bool", false), diff --git a/drivers/mock/driver_test.go b/drivers/mock/driver_test.go index 3c7f6fc1e46d..cf999b7812ac 100644 --- a/drivers/mock/driver_test.go +++ b/drivers/mock/driver_test.go @@ -24,7 +24,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" basePlug "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" dtestutil "github.com/hashicorp/nomad/plugins/drivers/testutils" ) @@ -133,13 +132,13 @@ func mkTestAllocDir(t *testing.T, h *dtestutil.DriverHarness, logger hclog.Logge dir, err := os.MkdirTemp("", "nomad_driver_harness-") must.NoError(t, err) - allocDir := allocdir.NewAllocDir(logger, dir, dir, tc.AllocID) + allocDir := allocdir.NewAllocDir(logger, dir, tc.AllocID) must.NoError(t, allocDir.Build()) tc.AllocDir = allocDir.AllocDir taskDir := allocDir.NewTaskDir(tc.Name) - must.NoError(t, taskDir.Build(fsisolation.None, ci.TinyChroot, tc.User)) + must.NoError(t, taskDir.Build(false, ci.TinyChroot)) task := &structs.Task{ Name: tc.Name, diff --git a/drivers/qemu/driver.go b/drivers/qemu/driver.go index 57c99522a6ef..4763f2b44ff6 100644 --- a/drivers/qemu/driver.go +++ b/drivers/qemu/driver.go @@ -24,7 +24,6 @@ import ( "github.com/hashicorp/nomad/helper/pluginutils/loader" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/shared/hclspec" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" ) @@ -103,7 +102,7 @@ var ( capabilities = &drivers.Capabilities{ SendSignals: false, Exec: false, - FSIsolation: fsisolation.Image, + FSIsolation: drivers.FSIsolationImage, NetIsolationModes: []drivers.NetIsolationMode{ drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup, diff --git a/drivers/rawexec/driver.go b/drivers/rawexec/driver.go index b2efc83eb61c..4b734ec07f82 100644 --- a/drivers/rawexec/driver.go +++ b/drivers/rawexec/driver.go @@ -8,7 +8,9 @@ import ( "fmt" "os" "path/filepath" + "runtime" "strconv" + "syscall" "time" "github.com/hashicorp/consul-template/signals" @@ -19,7 +21,6 @@ import ( "github.com/hashicorp/nomad/helper/pluginutils/loader" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/shared/hclspec" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" ) @@ -60,6 +61,9 @@ func PluginLoader(opts map[string]string) (map[string]interface{}, error) { if v, err := strconv.ParseBool(opts["driver.raw_exec.enable"]); err == nil { conf["enabled"] = v } + if v, err := strconv.ParseBool(opts["driver.raw_exec.no_cgroups"]); err == nil { + conf["no_cgroups"] = v + } return conf, nil } @@ -78,6 +82,10 @@ var ( hclspec.NewAttr("enabled", "bool", false), hclspec.NewLiteral("false"), ), + "no_cgroups": hclspec.NewDefault( + hclspec.NewAttr("no_cgroups", "bool", false), + hclspec.NewLiteral("false"), + ), }) // taskConfigSpec is the hcl specification for the driver config section of @@ -92,7 +100,7 @@ var ( capabilities = &drivers.Capabilities{ SendSignals: true, Exec: true, - FSIsolation: fsisolation.None, + FSIsolation: drivers.FSIsolationNone, NetIsolationModes: []drivers.NetIsolationMode{ drivers.NetIsolationModeHost, drivers.NetIsolationModeGroup, @@ -131,6 +139,10 @@ type Driver struct { // Config is the driver configuration set by the SetConfig RPC call type Config struct { + // NoCgroups tracks whether we should use a cgroup to manage the process + // tree + NoCgroups bool `codec:"no_cgroups"` + // Enabled is set to true to enable the raw_exec driver Enabled bool `codec:"enabled"` } @@ -324,16 +336,21 @@ func (d *Driver) StartTask(cfg *drivers.TaskConfig) (*drivers.TaskHandle, *drive return nil, nil, fmt.Errorf("failed to create executor: %v", err) } + // Only use cgroups when running as root on linux - Doing so in other cases + // will cause an error. + useCgroups := !d.config.NoCgroups && runtime.GOOS == "linux" && syscall.Geteuid() == 0 + execCmd := &executor.ExecCommand{ - Cmd: driverConfig.Command, - Args: driverConfig.Args, - Env: cfg.EnvList(), - User: cfg.User, - TaskDir: cfg.TaskDir().Dir, - StdoutPath: cfg.StdoutPath, - StderrPath: cfg.StderrPath, - NetworkIsolation: cfg.NetworkIsolation, - Resources: cfg.Resources.Copy(), + Cmd: driverConfig.Command, + Args: driverConfig.Args, + Env: cfg.EnvList(), + User: cfg.User, + BasicProcessCgroup: useCgroups, + TaskDir: cfg.TaskDir().Dir, + StdoutPath: cfg.StdoutPath, + StderrPath: cfg.StderrPath, + NetworkIsolation: cfg.NetworkIsolation, + Resources: cfg.Resources.Copy(), } ps, err := exec.Launch(execCmd) diff --git a/drivers/rawexec/driver_test.go b/drivers/rawexec/driver_test.go index 35a60fc2b2ae..b2315969a5e3 100644 --- a/drivers/rawexec/driver_test.go +++ b/drivers/rawexec/driver_test.go @@ -118,6 +118,15 @@ func TestRawExecDriver_SetConfig(t *testing.T) { // Enable raw_exec, but disable cgroups. config.Enabled = true + config.NoCgroups = true + data = []byte{} + require.NoError(basePlug.MsgPackEncode(&data, config)) + bconfig.PluginConfig = data + require.NoError(harness.SetConfig(bconfig)) + require.Exactly(config, d.(*Driver).config) + + // Enable raw_exec, enable cgroups. + config.NoCgroups = false data = []byte{} require.NoError(basePlug.MsgPackEncode(&data, config)) bconfig.PluginConfig = data @@ -245,7 +254,8 @@ func TestRawExecDriver_StartWaitRecoverWaitStop(t *testing.T) { harness := dtestutil.NewDriverHarness(t, d) defer harness.Kill() - config := &Config{Enabled: true} + // Disable cgroups so test works without root + config := &Config{NoCgroups: true, Enabled: true} var data []byte require.NoError(basePlug.MsgPackEncode(&data, config)) bconfig := &basePlug.Config{ diff --git a/drivers/rawexec/driver_unix_test.go b/drivers/rawexec/driver_unix_test.go index a66acbf9605d..55f5212a4c67 100644 --- a/drivers/rawexec/driver_unix_test.go +++ b/drivers/rawexec/driver_unix_test.go @@ -148,7 +148,7 @@ func TestRawExecDriver_StartWaitStop(t *testing.T) { harness := dtestutil.NewDriverHarness(t, d) defer harness.Kill() - config := &Config{Enabled: true} + config := &Config{NoCgroups: false, Enabled: true} var data []byte require.NoError(base.MsgPackEncode(&data, config)) bconfig := &base.Config{ diff --git a/drivers/shared/executor/executor.go b/drivers/shared/executor/executor.go index bbb00571777c..e4450fc77b0f 100644 --- a/drivers/shared/executor/executor.go +++ b/drivers/shared/executor/executor.go @@ -129,6 +129,11 @@ type ExecCommand struct { // executor. ResourceLimits bool + // Cgroup marks whether we put the process in a cgroup. Setting this field + // doesn't enforce resource limits. To enforce limits, set ResourceLimits. + // Using the cgroup does allow more precise cleanup of processes. + BasicProcessCgroup bool + // NoPivotRoot disables using pivot_root for isolation, useful when the root // partition is on a ramdisk which does not support pivot_root, // see man 2 pivot_root diff --git a/drivers/shared/executor/executor_linux_test.go b/drivers/shared/executor/executor_linux_test.go index 70de4b21086e..cd7661032ccf 100644 --- a/drivers/shared/executor/executor_linux_test.go +++ b/drivers/shared/executor/executor_linux_test.go @@ -23,7 +23,6 @@ import ( "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" tu "github.com/hashicorp/nomad/testutil" lconfigs "github.com/opencontainers/runc/libcontainer/configs" "github.com/opencontainers/runc/libcontainer/devices" @@ -71,11 +70,11 @@ func testExecutorCommandWithChroot(t *testing.T) *testExecCmd { task := alloc.Job.TaskGroups[0].Tasks[0] taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, "global").Build() - allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), os.TempDir(), os.TempDir(), alloc.ID) + allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), os.TempDir(), alloc.ID) if err := allocDir.Build(); err != nil { t.Fatalf("AllocDir.Build() failed: %v", err) } - if err := allocDir.NewTaskDir(task.Name).Build(fsisolation.Chroot, chrootEnv, task.User); err != nil { + if err := allocDir.NewTaskDir(task.Name).Build(true, chrootEnv); err != nil { allocDir.Destroy() t.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err) } diff --git a/drivers/shared/executor/executor_test.go b/drivers/shared/executor/executor_test.go index fa18b0abb3ac..56f3febbd252 100644 --- a/drivers/shared/executor/executor_test.go +++ b/drivers/shared/executor/executor_test.go @@ -30,7 +30,6 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" tu "github.com/hashicorp/nomad/testutil" ps "github.com/mitchellh/go-ps" "github.com/shoenig/test/must" @@ -76,11 +75,11 @@ func testExecutorCommand(t *testing.T) *testExecCmd { task := alloc.Job.TaskGroups[0].Tasks[0] taskEnv := taskenv.NewBuilder(mock.Node(), alloc, task, "global").Build() - allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), t.TempDir(), t.TempDir(), alloc.ID) + allocDir := allocdir.NewAllocDir(testlog.HCLogger(t), t.TempDir(), alloc.ID) if err := allocDir.Build(); err != nil { t.Fatalf("AllocDir.Build() failed: %v", err) } - if err := allocDir.NewTaskDir(task.Name).Build(fsisolation.None, nil, task.User); err != nil { + if err := allocDir.NewTaskDir(task.Name).Build(false, nil); err != nil { allocDir.Destroy() t.Fatalf("allocDir.NewTaskDir(%q) failed: %v", task.Name, err) } @@ -648,9 +647,9 @@ func TestExecutor_Start_NonExecutableBinaries(t *testing.T) { // need to configure path in chroot with that file if using isolation executor if _, ok := executor.(*UniversalExecutor); !ok { taskName := filepath.Base(testExecCmd.command.TaskDir) - err := allocDir.NewTaskDir(taskName).Build(fsisolation.Chroot, map[string]string{ + err := allocDir.NewTaskDir(taskName).Build(true, map[string]string{ tmpDir: tmpDir, - }, "nobody") + }) require.NoError(err) } diff --git a/drivers/shared/executor/grpc_client.go b/drivers/shared/executor/grpc_client.go index a25a0aba28c8..b13fe04b8bd8 100644 --- a/drivers/shared/executor/grpc_client.go +++ b/drivers/shared/executor/grpc_client.go @@ -36,22 +36,23 @@ type grpcExecutorClient struct { func (c *grpcExecutorClient) Launch(cmd *ExecCommand) (*ProcessState, error) { ctx := context.Background() req := &proto.LaunchRequest{ - Cmd: cmd.Cmd, - Args: cmd.Args, - Resources: drivers.ResourcesToProto(cmd.Resources), - StdoutPath: cmd.StdoutPath, - StderrPath: cmd.StderrPath, - Env: cmd.Env, - User: cmd.User, - TaskDir: cmd.TaskDir, - ResourceLimits: cmd.ResourceLimits, - NoPivotRoot: cmd.NoPivotRoot, - Mounts: drivers.MountsToProto(cmd.Mounts), - Devices: drivers.DevicesToProto(cmd.Devices), - NetworkIsolation: drivers.NetworkIsolationSpecToProto(cmd.NetworkIsolation), - DefaultPidMode: cmd.ModePID, - DefaultIpcMode: cmd.ModeIPC, - Capabilities: cmd.Capabilities, + Cmd: cmd.Cmd, + Args: cmd.Args, + Resources: drivers.ResourcesToProto(cmd.Resources), + StdoutPath: cmd.StdoutPath, + StderrPath: cmd.StderrPath, + Env: cmd.Env, + User: cmd.User, + TaskDir: cmd.TaskDir, + ResourceLimits: cmd.ResourceLimits, + BasicProcessCgroup: cmd.BasicProcessCgroup, + NoPivotRoot: cmd.NoPivotRoot, + Mounts: drivers.MountsToProto(cmd.Mounts), + Devices: drivers.DevicesToProto(cmd.Devices), + NetworkIsolation: drivers.NetworkIsolationSpecToProto(cmd.NetworkIsolation), + DefaultPidMode: cmd.ModePID, + DefaultIpcMode: cmd.ModeIPC, + Capabilities: cmd.Capabilities, } resp, err := c.client.Launch(ctx, req) if err != nil { diff --git a/drivers/shared/executor/grpc_server.go b/drivers/shared/executor/grpc_server.go index 2e5530bbe92c..d6c7413d4bbe 100644 --- a/drivers/shared/executor/grpc_server.go +++ b/drivers/shared/executor/grpc_server.go @@ -25,22 +25,23 @@ type grpcExecutorServer struct { func (s *grpcExecutorServer) Launch(ctx context.Context, req *proto.LaunchRequest) (*proto.LaunchResponse, error) { ps, err := s.impl.Launch(&ExecCommand{ - Cmd: req.Cmd, - Args: req.Args, - Resources: drivers.ResourcesFromProto(req.Resources), - StdoutPath: req.StdoutPath, - StderrPath: req.StderrPath, - Env: req.Env, - User: req.User, - TaskDir: req.TaskDir, - ResourceLimits: req.ResourceLimits, - NoPivotRoot: req.NoPivotRoot, - Mounts: drivers.MountsFromProto(req.Mounts), - Devices: drivers.DevicesFromProto(req.Devices), - NetworkIsolation: drivers.NetworkIsolationSpecFromProto(req.NetworkIsolation), - ModePID: req.DefaultPidMode, - ModeIPC: req.DefaultIpcMode, - Capabilities: req.Capabilities, + Cmd: req.Cmd, + Args: req.Args, + Resources: drivers.ResourcesFromProto(req.Resources), + StdoutPath: req.StdoutPath, + StderrPath: req.StderrPath, + Env: req.Env, + User: req.User, + TaskDir: req.TaskDir, + ResourceLimits: req.ResourceLimits, + BasicProcessCgroup: req.BasicProcessCgroup, + NoPivotRoot: req.NoPivotRoot, + Mounts: drivers.MountsFromProto(req.Mounts), + Devices: drivers.DevicesFromProto(req.Devices), + NetworkIsolation: drivers.NetworkIsolationSpecFromProto(req.NetworkIsolation), + ModePID: req.DefaultPidMode, + ModeIPC: req.DefaultIpcMode, + Capabilities: req.Capabilities, }) if err != nil { diff --git a/drivers/shared/executor/proto/executor.pb.go b/drivers/shared/executor/proto/executor.pb.go index b75694a19c8c..71d10ab52759 100644 --- a/drivers/shared/executor/proto/executor.pb.go +++ b/drivers/shared/executor/proto/executor.pb.go @@ -36,7 +36,7 @@ type LaunchRequest struct { User string `protobuf:"bytes,7,opt,name=user,proto3" json:"user,omitempty"` TaskDir string `protobuf:"bytes,8,opt,name=task_dir,json=taskDir,proto3" json:"task_dir,omitempty"` ResourceLimits bool `protobuf:"varint,9,opt,name=resource_limits,json=resourceLimits,proto3" json:"resource_limits,omitempty"` - BasicProcessCgroup bool `protobuf:"varint,10,opt,name=basic_process_cgroup,json=basicProcessCgroup,proto3" json:"basic_process_cgroup,omitempty"` // Deprecated: Do not use. + BasicProcessCgroup bool `protobuf:"varint,10,opt,name=basic_process_cgroup,json=basicProcessCgroup,proto3" json:"basic_process_cgroup,omitempty"` Mounts []*proto1.Mount `protobuf:"bytes,11,rep,name=mounts,proto3" json:"mounts,omitempty"` Devices []*proto1.Device `protobuf:"bytes,12,rep,name=devices,proto3" json:"devices,omitempty"` NetworkIsolation *proto1.NetworkIsolationSpec `protobuf:"bytes,13,opt,name=network_isolation,json=networkIsolation,proto3" json:"network_isolation,omitempty"` @@ -139,7 +139,6 @@ func (m *LaunchRequest) GetResourceLimits() bool { return false } -// Deprecated: Do not use. func (m *LaunchRequest) GetBasicProcessCgroup() bool { if m != nil { return m.BasicProcessCgroup @@ -883,75 +882,75 @@ func init() { } var fileDescriptor_66b85426380683f3 = []byte{ - // 1083 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xff, 0x6e, 0x1b, 0x45, - 0x10, 0xee, 0xc5, 0x89, 0x7f, 0x8c, 0xed, 0xc4, 0x5d, 0x50, 0xb8, 0x1a, 0xa1, 0x9a, 0x43, 0xa2, - 0x16, 0x94, 0x4b, 0x94, 0xa6, 0x2d, 0x12, 0x12, 0x45, 0x4d, 0x0a, 0xaa, 0x48, 0xa3, 0xe8, 0x52, - 0xa8, 0xc4, 0x1f, 0x1c, 0x9b, 0xbb, 0xad, 0xbd, 0xca, 0xf9, 0xf6, 0xd8, 0xdd, 0x73, 0x82, 0x84, - 0xc4, 0x4b, 0x80, 0xc4, 0x03, 0xf0, 0x20, 0x3c, 0x1a, 0xda, 0x5f, 0x17, 0x3b, 0x2d, 0xd5, 0xb9, - 0x88, 0xbf, 0x7c, 0x3b, 0xfe, 0xbe, 0x99, 0xd9, 0x9d, 0x99, 0x6f, 0xe0, 0x6e, 0xca, 0xe9, 0x9c, - 0x70, 0xb1, 0x23, 0xa6, 0x98, 0x93, 0x74, 0x87, 0x5c, 0x92, 0xa4, 0x94, 0x8c, 0xef, 0x14, 0x9c, - 0x49, 0x56, 0x1d, 0x43, 0x7d, 0x44, 0x1f, 0x4f, 0xb1, 0x98, 0xd2, 0x84, 0xf1, 0x22, 0xcc, 0xd9, - 0x0c, 0xa7, 0x61, 0x91, 0x95, 0x13, 0x9a, 0x8b, 0x70, 0x19, 0x37, 0xbc, 0x3d, 0x61, 0x6c, 0x92, - 0x11, 0xe3, 0xe4, 0xac, 0x7c, 0xb9, 0x23, 0xe9, 0x8c, 0x08, 0x89, 0x67, 0x85, 0x05, 0x04, 0x96, - 0xb8, 0xe3, 0xc2, 0x9b, 0x70, 0xe6, 0x64, 0x30, 0xc1, 0xdf, 0x4d, 0xe8, 0x1f, 0xe1, 0x32, 0x4f, - 0xa6, 0x11, 0xf9, 0xb9, 0x24, 0x42, 0xa2, 0x01, 0x34, 0x92, 0x59, 0xea, 0x7b, 0x23, 0x6f, 0xdc, - 0x89, 0xd4, 0x27, 0x42, 0xb0, 0x8e, 0xf9, 0x44, 0xf8, 0x6b, 0xa3, 0xc6, 0xb8, 0x13, 0xe9, 0x6f, - 0x74, 0x0c, 0x1d, 0x4e, 0x04, 0x2b, 0x79, 0x42, 0x84, 0xdf, 0x18, 0x79, 0xe3, 0xee, 0xde, 0x6e, - 0xf8, 0x6f, 0x89, 0xdb, 0xf8, 0x26, 0x64, 0x18, 0x39, 0x5e, 0x74, 0xe5, 0x02, 0xdd, 0x86, 0xae, - 0x90, 0x29, 0x2b, 0x65, 0x5c, 0x60, 0x39, 0xf5, 0xd7, 0x75, 0x74, 0x30, 0xa6, 0x13, 0x2c, 0xa7, - 0x16, 0x40, 0x38, 0x37, 0x80, 0x8d, 0x0a, 0x40, 0x38, 0xd7, 0x80, 0x01, 0x34, 0x48, 0x3e, 0xf7, - 0x9b, 0x3a, 0x49, 0xf5, 0xa9, 0xf2, 0x2e, 0x05, 0xe1, 0x7e, 0x4b, 0x63, 0xf5, 0x37, 0xba, 0x05, - 0x6d, 0x89, 0xc5, 0x79, 0x9c, 0x52, 0xee, 0xb7, 0xb5, 0xbd, 0xa5, 0xce, 0x87, 0x94, 0xa3, 0x3b, - 0xb0, 0xe5, 0xf2, 0x89, 0x33, 0x3a, 0xa3, 0x52, 0xf8, 0x9d, 0x91, 0x37, 0x6e, 0x47, 0x9b, 0xce, - 0x7c, 0xa4, 0xad, 0x68, 0x1f, 0xde, 0x3d, 0xc3, 0x82, 0x26, 0x71, 0xc1, 0x59, 0x42, 0x84, 0x88, - 0x93, 0x09, 0x67, 0x65, 0xe1, 0x83, 0x42, 0x3f, 0x5e, 0xf3, 0xbd, 0x08, 0xe9, 0xff, 0x4f, 0xcc, - 0xdf, 0x07, 0xfa, 0x5f, 0x74, 0x08, 0xcd, 0x19, 0x2b, 0x73, 0x29, 0xfc, 0xee, 0xa8, 0x31, 0xee, - 0xee, 0xdd, 0xad, 0xf9, 0x5c, 0xcf, 0x14, 0x29, 0xb2, 0x5c, 0xf4, 0x0d, 0xb4, 0x52, 0x32, 0xa7, - 0xea, 0xd5, 0x7b, 0xda, 0xcd, 0x67, 0x35, 0xdd, 0x1c, 0x6a, 0x56, 0xe4, 0xd8, 0x68, 0x0a, 0x37, - 0x73, 0x22, 0x2f, 0x18, 0x3f, 0x8f, 0xa9, 0x60, 0x19, 0x96, 0x94, 0xe5, 0x7e, 0x5f, 0x17, 0xf2, - 0x8b, 0x9a, 0x2e, 0x8f, 0x0d, 0xff, 0xa9, 0xa3, 0x9f, 0x16, 0x24, 0x89, 0x06, 0xf9, 0x35, 0x2b, - 0x0a, 0xa0, 0x9f, 0xb3, 0xb8, 0xa0, 0x73, 0x26, 0x63, 0xce, 0x98, 0xf4, 0x37, 0xf5, 0xab, 0x76, - 0x73, 0x76, 0xa2, 0x6c, 0x11, 0x63, 0x12, 0x8d, 0x61, 0x90, 0x92, 0x97, 0xb8, 0xcc, 0x64, 0x5c, - 0xd0, 0x34, 0x9e, 0xb1, 0x94, 0xf8, 0x5b, 0xba, 0x3c, 0x9b, 0xd6, 0x7e, 0x42, 0xd3, 0x67, 0x2c, - 0x25, 0x8b, 0x48, 0x5a, 0x24, 0x06, 0x39, 0x58, 0x42, 0x3e, 0x2d, 0x12, 0x8d, 0xfc, 0x08, 0xfa, - 0x49, 0x51, 0x0a, 0x22, 0x5d, 0x7d, 0x6e, 0x6a, 0x58, 0xcf, 0x18, 0x6d, 0x55, 0x3e, 0x00, 0xc0, - 0x59, 0xc6, 0x2e, 0xe2, 0x04, 0x17, 0xc2, 0x47, 0xba, 0x79, 0x3a, 0xda, 0x72, 0x80, 0x0b, 0x81, - 0x02, 0xe8, 0x25, 0xb8, 0xc0, 0x67, 0x34, 0xa3, 0x92, 0x12, 0xe1, 0xbf, 0xa3, 0x01, 0x4b, 0xb6, - 0xe0, 0x27, 0xd8, 0x74, 0x13, 0x24, 0x0a, 0x96, 0x0b, 0x82, 0x8e, 0xa1, 0x65, 0x5b, 0x43, 0x8f, - 0x51, 0x77, 0x6f, 0x3f, 0xac, 0x37, 0xd3, 0xa1, 0x6d, 0x99, 0x53, 0x89, 0x25, 0x89, 0x9c, 0x93, - 0xa0, 0x0f, 0xdd, 0x17, 0x98, 0x4a, 0x3b, 0xa1, 0xc1, 0x8f, 0xd0, 0x33, 0xc7, 0xff, 0x29, 0xdc, - 0x11, 0x6c, 0x9d, 0x4e, 0x4b, 0x99, 0xb2, 0x8b, 0xdc, 0x89, 0xc2, 0x36, 0x34, 0x05, 0x9d, 0xe4, - 0x38, 0xb3, 0xba, 0x60, 0x4f, 0xe8, 0x43, 0xe8, 0x4d, 0x38, 0x4e, 0x48, 0x5c, 0x10, 0x4e, 0x59, - 0xea, 0xaf, 0x8d, 0xbc, 0x71, 0x23, 0xea, 0x6a, 0xdb, 0x89, 0x36, 0x05, 0x08, 0x06, 0x57, 0xde, - 0x4c, 0xc6, 0xc1, 0x14, 0xb6, 0xbf, 0x2b, 0x52, 0x15, 0xb4, 0xd2, 0x02, 0x1b, 0x68, 0x49, 0x57, - 0xbc, 0xff, 0xac, 0x2b, 0xc1, 0x2d, 0x78, 0xef, 0x95, 0x48, 0x36, 0x89, 0x01, 0x6c, 0x7e, 0x4f, - 0xb8, 0xa0, 0xcc, 0xdd, 0x32, 0xf8, 0x14, 0xb6, 0x2a, 0x8b, 0x7d, 0x5b, 0x1f, 0x5a, 0x73, 0x63, - 0xb2, 0x37, 0x77, 0xc7, 0xe0, 0x13, 0xe8, 0xa9, 0x77, 0xab, 0x32, 0x1f, 0x42, 0x9b, 0xe6, 0x92, - 0xf0, 0xb9, 0x7d, 0xa4, 0x46, 0x54, 0x9d, 0x83, 0x17, 0xd0, 0xb7, 0x58, 0xeb, 0xf6, 0x6b, 0xd8, - 0x10, 0xca, 0xb0, 0xe2, 0x15, 0x9f, 0x63, 0x71, 0x6e, 0x1c, 0x19, 0x7a, 0x70, 0x07, 0xfa, 0xa7, - 0xba, 0x12, 0xaf, 0x2f, 0xd4, 0x86, 0x2b, 0x94, 0xba, 0xac, 0x03, 0xda, 0xeb, 0x9f, 0x43, 0xf7, - 0xc9, 0x25, 0x49, 0x1c, 0xf1, 0x01, 0xb4, 0x53, 0x82, 0xd3, 0x8c, 0xe6, 0xc4, 0x26, 0x35, 0x0c, - 0xcd, 0x82, 0x09, 0xdd, 0x82, 0x09, 0x9f, 0xbb, 0x05, 0x13, 0x55, 0x58, 0xb7, 0x2e, 0xd6, 0x5e, - 0x5d, 0x17, 0x8d, 0xab, 0x75, 0x11, 0x1c, 0x40, 0xcf, 0x04, 0xb3, 0xf7, 0xdf, 0x86, 0x26, 0x2b, - 0x65, 0x51, 0x4a, 0x1d, 0xab, 0x17, 0xd9, 0x13, 0x7a, 0x1f, 0x3a, 0xe4, 0x92, 0xca, 0x38, 0x51, - 0x63, 0xbd, 0xa6, 0x6f, 0xd0, 0x56, 0x86, 0x03, 0x96, 0x92, 0xe0, 0x2f, 0x0f, 0x7a, 0x8b, 0x1d, - 0xab, 0x62, 0x17, 0x34, 0xb5, 0x37, 0x55, 0x9f, 0x6f, 0xe4, 0x2f, 0xbc, 0x4d, 0x63, 0xf1, 0x6d, - 0x50, 0x08, 0xeb, 0x6a, 0x75, 0xea, 0xa5, 0xf3, 0xe6, 0x6b, 0x6b, 0x9c, 0xd2, 0x0c, 0xc6, 0x66, - 0xf1, 0x39, 0xcd, 0x32, 0x92, 0xea, 0x4d, 0xd4, 0x8e, 0x3a, 0x8c, 0xcd, 0xbe, 0xd5, 0x86, 0xbd, - 0x3f, 0x3a, 0xd0, 0x7e, 0x62, 0xe7, 0x0c, 0xfd, 0x02, 0x4d, 0x23, 0x0e, 0xe8, 0x7e, 0xdd, 0xa1, - 0x5c, 0x5a, 0xc7, 0xc3, 0x07, 0xab, 0xd2, 0x6c, 0x79, 0x6f, 0x20, 0x01, 0xeb, 0x4a, 0x26, 0xd0, - 0xbd, 0xba, 0x1e, 0x16, 0x34, 0x66, 0xb8, 0xbf, 0x1a, 0xa9, 0x0a, 0xfa, 0x1b, 0xb4, 0xdd, 0xb4, - 0xa3, 0x87, 0x75, 0x7d, 0x5c, 0x53, 0x9b, 0xe1, 0xe7, 0xab, 0x13, 0xab, 0x04, 0x7e, 0xf7, 0x60, - 0xeb, 0xda, 0xc4, 0xa3, 0x2f, 0xeb, 0xfa, 0x7b, 0xbd, 0x28, 0x0d, 0x1f, 0xbd, 0x35, 0xbf, 0x4a, - 0xeb, 0x57, 0x68, 0x59, 0x69, 0x41, 0xb5, 0x2b, 0xba, 0xac, 0x4e, 0xc3, 0x87, 0x2b, 0xf3, 0xaa, - 0xe8, 0x97, 0xb0, 0xa1, 0x65, 0x03, 0xd5, 0x2e, 0xeb, 0xa2, 0xb4, 0x0d, 0xef, 0xaf, 0xc8, 0x72, - 0x71, 0x77, 0x3d, 0xd5, 0xff, 0x46, 0x77, 0xea, 0xf7, 0xff, 0x92, 0xa0, 0xd5, 0xef, 0xff, 0x6b, - 0xf2, 0xa6, 0xfb, 0x5f, 0x8d, 0x61, 0xfd, 0xfe, 0x5f, 0x90, 0xc3, 0xfa, 0xfd, 0xbf, 0x28, 0x6b, - 0xc1, 0x0d, 0xf4, 0xa7, 0x07, 0x7d, 0x65, 0x3a, 0x95, 0x9c, 0xe0, 0x19, 0xcd, 0x27, 0xe8, 0x51, - 0x4d, 0x6d, 0x57, 0x2c, 0xa3, 0xef, 0x96, 0xe9, 0x52, 0xf9, 0xea, 0xed, 0x1d, 0xb8, 0xb4, 0xc6, - 0xde, 0xae, 0xf7, 0xb8, 0xf5, 0xc3, 0x86, 0x91, 0xb4, 0xa6, 0xfe, 0xb9, 0xf7, 0x4f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x2d, 0xae, 0x48, 0x7d, 0x97, 0x0c, 0x00, 0x00, + // 1079 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0x7d, 0x6f, 0x1b, 0xc5, + 0x13, 0xee, 0xc5, 0x89, 0x5f, 0xc6, 0x76, 0xe2, 0xee, 0xef, 0xa7, 0x70, 0x35, 0x42, 0x35, 0x87, + 0x44, 0x2d, 0x28, 0x97, 0x28, 0x7d, 0x43, 0x42, 0xa2, 0x88, 0xa4, 0xa0, 0x8a, 0x34, 0x8a, 0x2e, + 0x85, 0x4a, 0xfc, 0xc1, 0xb1, 0xb9, 0xdb, 0xda, 0x2b, 0x9f, 0x6f, 0x8f, 0xdd, 0x3d, 0x27, 0x48, + 0x48, 0x7c, 0x09, 0x90, 0xf8, 0x00, 0x7c, 0x0c, 0x3e, 0x1c, 0xda, 0xb7, 0x8b, 0x9d, 0x96, 0xea, + 0x5c, 0xc4, 0x5f, 0xbe, 0x1d, 0x3f, 0xcf, 0xcc, 0xec, 0xce, 0xcc, 0x33, 0x70, 0x37, 0xe5, 0x74, + 0x41, 0xb8, 0xd8, 0x13, 0x53, 0xcc, 0x49, 0xba, 0x47, 0x2e, 0x49, 0x52, 0x4a, 0xc6, 0xf7, 0x0a, + 0xce, 0x24, 0xab, 0x8e, 0xa1, 0x3e, 0xa2, 0x0f, 0xa7, 0x58, 0x4c, 0x69, 0xc2, 0x78, 0x11, 0xe6, + 0x6c, 0x8e, 0xd3, 0xb0, 0xc8, 0xca, 0x09, 0xcd, 0x45, 0xb8, 0x8a, 0x1b, 0xde, 0x9e, 0x30, 0x36, + 0xc9, 0x88, 0x71, 0x72, 0x5e, 0xbe, 0xdc, 0x93, 0x74, 0x4e, 0x84, 0xc4, 0xf3, 0xc2, 0x02, 0x02, + 0x4b, 0xdc, 0x73, 0xe1, 0x4d, 0x38, 0x73, 0x32, 0x98, 0xe0, 0xaf, 0x26, 0xf4, 0x8f, 0x71, 0x99, + 0x27, 0xd3, 0x88, 0xfc, 0x54, 0x12, 0x21, 0xd1, 0x00, 0x1a, 0xc9, 0x3c, 0xf5, 0xbd, 0x91, 0x37, + 0xee, 0x44, 0xea, 0x13, 0x21, 0xd8, 0xc4, 0x7c, 0x22, 0xfc, 0x8d, 0x51, 0x63, 0xdc, 0x89, 0xf4, + 0x37, 0x3a, 0x81, 0x0e, 0x27, 0x82, 0x95, 0x3c, 0x21, 0xc2, 0x6f, 0x8c, 0xbc, 0x71, 0xf7, 0x60, + 0x3f, 0xfc, 0xa7, 0xc4, 0x6d, 0x7c, 0x13, 0x32, 0x8c, 0x1c, 0x2f, 0xba, 0x72, 0x81, 0x6e, 0x43, + 0x57, 0xc8, 0x94, 0x95, 0x32, 0x2e, 0xb0, 0x9c, 0xfa, 0x9b, 0x3a, 0x3a, 0x18, 0xd3, 0x29, 0x96, + 0x53, 0x0b, 0x20, 0x9c, 0x1b, 0xc0, 0x56, 0x05, 0x20, 0x9c, 0x6b, 0xc0, 0x00, 0x1a, 0x24, 0x5f, + 0xf8, 0x4d, 0x9d, 0xa4, 0xfa, 0x54, 0x79, 0x97, 0x82, 0x70, 0xbf, 0xa5, 0xb1, 0xfa, 0x1b, 0xdd, + 0x82, 0xb6, 0xc4, 0x62, 0x16, 0xa7, 0x94, 0xfb, 0x6d, 0x6d, 0x6f, 0xa9, 0xf3, 0x11, 0xe5, 0xe8, + 0x0e, 0xec, 0xb8, 0x7c, 0xe2, 0x8c, 0xce, 0xa9, 0x14, 0x7e, 0x67, 0xe4, 0x8d, 0xdb, 0xd1, 0xb6, + 0x33, 0x1f, 0x6b, 0x2b, 0xda, 0x87, 0xff, 0x9f, 0x63, 0x41, 0x93, 0xb8, 0xe0, 0x2c, 0x21, 0x42, + 0xc4, 0xc9, 0x84, 0xb3, 0xb2, 0xf0, 0x41, 0xa3, 0x91, 0xfe, 0xef, 0xd4, 0xfc, 0x75, 0xa8, 0xff, + 0x41, 0x47, 0xd0, 0x9c, 0xb3, 0x32, 0x97, 0xc2, 0xef, 0x8e, 0x1a, 0xe3, 0xee, 0xc1, 0xdd, 0x9a, + 0x4f, 0xf5, 0x4c, 0x91, 0x22, 0xcb, 0x45, 0x5f, 0x43, 0x2b, 0x25, 0x0b, 0xaa, 0x5e, 0xbc, 0xa7, + 0xdd, 0x7c, 0x52, 0xd3, 0xcd, 0x91, 0x66, 0x45, 0x8e, 0x8d, 0xa6, 0x70, 0x33, 0x27, 0xf2, 0x82, + 0xf1, 0x59, 0x4c, 0x05, 0xcb, 0xb0, 0xa4, 0x2c, 0xf7, 0xfb, 0xba, 0x88, 0x9f, 0xd5, 0x74, 0x79, + 0x62, 0xf8, 0x4f, 0x1d, 0xfd, 0xac, 0x20, 0x49, 0x34, 0xc8, 0xaf, 0x59, 0x51, 0x00, 0xfd, 0x9c, + 0xc5, 0x05, 0x5d, 0x30, 0x19, 0x73, 0xc6, 0xa4, 0xbf, 0xad, 0xdf, 0xa8, 0x9b, 0xb3, 0x53, 0x65, + 0x8b, 0x18, 0x93, 0x68, 0x0c, 0x83, 0x94, 0xbc, 0xc4, 0x65, 0x26, 0xe3, 0x82, 0xa6, 0xf1, 0x9c, + 0xa5, 0xc4, 0xdf, 0xd1, 0xa5, 0xd9, 0xb6, 0xf6, 0x53, 0x9a, 0x3e, 0x63, 0x29, 0x59, 0x46, 0xd2, + 0x22, 0x31, 0xc8, 0xc1, 0x0a, 0xf2, 0x69, 0x91, 0x68, 0xe4, 0x07, 0xd0, 0x4f, 0x8a, 0x52, 0x10, + 0xe9, 0x6a, 0x73, 0x53, 0xc3, 0x7a, 0xc6, 0x68, 0xab, 0xf2, 0x1e, 0x00, 0xce, 0x32, 0x76, 0x11, + 0x27, 0xb8, 0x10, 0x3e, 0xd2, 0x8d, 0xd3, 0xd1, 0x96, 0x43, 0x5c, 0x08, 0x14, 0x40, 0x2f, 0xc1, + 0x05, 0x3e, 0xa7, 0x19, 0x95, 0x94, 0x08, 0xff, 0x7f, 0x1a, 0xb0, 0x62, 0x0b, 0x7e, 0x84, 0x6d, + 0x37, 0x3d, 0xa2, 0x60, 0xb9, 0x20, 0xe8, 0x04, 0x5a, 0xb6, 0x2d, 0xf4, 0x08, 0x75, 0x0f, 0xee, + 0x87, 0xf5, 0xe6, 0x39, 0xb4, 0x2d, 0x73, 0x26, 0xb1, 0x24, 0x91, 0x73, 0x12, 0xf4, 0xa1, 0xfb, + 0x02, 0x53, 0x69, 0xa7, 0x33, 0xf8, 0x01, 0x7a, 0xe6, 0xf8, 0x1f, 0x85, 0x3b, 0x86, 0x9d, 0xb3, + 0x69, 0x29, 0x53, 0x76, 0x91, 0x3b, 0x41, 0xd8, 0x85, 0xa6, 0xa0, 0x93, 0x1c, 0x67, 0x56, 0x13, + 0xec, 0x09, 0xbd, 0x0f, 0xbd, 0x09, 0xc7, 0x09, 0x89, 0x0b, 0xc2, 0x29, 0x4b, 0xfd, 0x8d, 0x91, + 0x37, 0x6e, 0x44, 0x5d, 0x6d, 0x3b, 0xd5, 0xa6, 0x00, 0xc1, 0xe0, 0xca, 0x9b, 0xc9, 0x38, 0x98, + 0xc2, 0xee, 0xb7, 0x45, 0xaa, 0x82, 0x56, 0x3a, 0x60, 0x03, 0xad, 0x68, 0x8a, 0xf7, 0xaf, 0x35, + 0x25, 0xb8, 0x05, 0xef, 0xbc, 0x12, 0xc9, 0x26, 0x31, 0x80, 0xed, 0xef, 0x08, 0x17, 0x94, 0xb9, + 0x5b, 0x06, 0x1f, 0xc3, 0x4e, 0x65, 0xb1, 0x6f, 0xeb, 0x43, 0x6b, 0x61, 0x4c, 0xf6, 0xe6, 0xee, + 0x18, 0x7c, 0x04, 0x3d, 0xf5, 0x6e, 0x55, 0xe6, 0x43, 0x68, 0xd3, 0x5c, 0x12, 0xbe, 0xb0, 0x8f, + 0xd4, 0x88, 0xaa, 0x73, 0xf0, 0x02, 0xfa, 0x16, 0x6b, 0xdd, 0x7e, 0x05, 0x5b, 0x42, 0x19, 0xd6, + 0xbc, 0xe2, 0x73, 0x2c, 0x66, 0xc6, 0x91, 0xa1, 0x07, 0x77, 0xa0, 0x7f, 0xa6, 0x2b, 0xf1, 0xfa, + 0x42, 0x6d, 0xb9, 0x42, 0xa9, 0xcb, 0x3a, 0xa0, 0xbd, 0xfe, 0x0c, 0xba, 0x4f, 0x2e, 0x49, 0xe2, + 0x88, 0x0f, 0xa1, 0x9d, 0x12, 0x9c, 0x66, 0x34, 0x27, 0x36, 0xa9, 0x61, 0x68, 0x96, 0x4b, 0xe8, + 0x96, 0x4b, 0xf8, 0xdc, 0x2d, 0x97, 0xa8, 0xc2, 0xba, 0x55, 0xb1, 0xf1, 0xea, 0xaa, 0x68, 0x5c, + 0xad, 0x8a, 0xe0, 0x10, 0x7a, 0x26, 0x98, 0xbd, 0xff, 0x2e, 0x34, 0x59, 0x29, 0x8b, 0x52, 0xea, + 0x58, 0xbd, 0xc8, 0x9e, 0xd0, 0xbb, 0xd0, 0x21, 0x97, 0x54, 0xc6, 0x89, 0x1a, 0xeb, 0x0d, 0x7d, + 0x83, 0xb6, 0x32, 0x1c, 0xb2, 0x94, 0x04, 0x7f, 0x7a, 0xd0, 0x5b, 0xee, 0x58, 0x15, 0xbb, 0xa0, + 0xa9, 0xbd, 0xa9, 0xfa, 0x7c, 0x23, 0x7f, 0xe9, 0x6d, 0x1a, 0xcb, 0x6f, 0x83, 0x42, 0xd8, 0x54, + 0x6b, 0x53, 0x2f, 0x9c, 0x37, 0x5f, 0x5b, 0xe3, 0x94, 0x66, 0x30, 0x36, 0x8f, 0x67, 0x34, 0xcb, + 0x48, 0xaa, 0xb7, 0x50, 0x3b, 0xea, 0x30, 0x36, 0xff, 0x46, 0x1b, 0x0e, 0x7e, 0xef, 0x40, 0xfb, + 0x89, 0x9d, 0x33, 0xf4, 0x33, 0x34, 0x8d, 0x38, 0xa0, 0x07, 0x75, 0x87, 0x72, 0x65, 0x15, 0x0f, + 0x1f, 0xae, 0x4b, 0xb3, 0xe5, 0xbd, 0x81, 0x04, 0x6c, 0x2a, 0x99, 0x40, 0xf7, 0xea, 0x7a, 0x58, + 0xd2, 0x98, 0xe1, 0xfd, 0xf5, 0x48, 0x55, 0xd0, 0x5f, 0xa1, 0xed, 0xa6, 0x1d, 0x3d, 0xaa, 0xeb, + 0xe3, 0x9a, 0xda, 0x0c, 0x3f, 0x5d, 0x9f, 0x58, 0x25, 0xf0, 0x9b, 0x07, 0x3b, 0xd7, 0x26, 0x1e, + 0x7d, 0x5e, 0xd7, 0xdf, 0xeb, 0x45, 0x69, 0xf8, 0xf8, 0xad, 0xf9, 0x55, 0x5a, 0xbf, 0x40, 0xcb, + 0x4a, 0x0b, 0xaa, 0x5d, 0xd1, 0x55, 0x75, 0x1a, 0x3e, 0x5a, 0x9b, 0x57, 0x45, 0xbf, 0x84, 0x2d, + 0x2d, 0x1b, 0xa8, 0x76, 0x59, 0x97, 0xa5, 0x6d, 0xf8, 0x60, 0x4d, 0x96, 0x8b, 0xbb, 0xef, 0xa9, + 0xfe, 0x37, 0xba, 0x53, 0xbf, 0xff, 0x57, 0x04, 0xad, 0x7e, 0xff, 0x5f, 0x93, 0x37, 0xdd, 0xff, + 0x6a, 0x0c, 0xeb, 0xf7, 0xff, 0x92, 0x1c, 0xd6, 0xef, 0xff, 0x65, 0x59, 0x0b, 0x6e, 0xa0, 0x3f, + 0x3c, 0xe8, 0x2b, 0xd3, 0x99, 0xe4, 0x04, 0xcf, 0x69, 0x3e, 0x41, 0x8f, 0x6b, 0x6a, 0xbb, 0x62, + 0x19, 0x7d, 0xb7, 0x4c, 0x97, 0xca, 0x17, 0x6f, 0xef, 0xc0, 0xa5, 0x35, 0xf6, 0xf6, 0xbd, 0x2f, + 0x5b, 0xdf, 0x6f, 0x19, 0x49, 0x6b, 0xea, 0x9f, 0x7b, 0x7f, 0x07, 0x00, 0x00, 0xff, 0xff, 0xd5, + 0xc9, 0x00, 0x0a, 0x93, 0x0c, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/drivers/shared/executor/proto/executor.proto b/drivers/shared/executor/proto/executor.proto index 55d896e71ef7..c2364cfc833a 100644 --- a/drivers/shared/executor/proto/executor.proto +++ b/drivers/shared/executor/proto/executor.proto @@ -40,7 +40,7 @@ message LaunchRequest { string user = 7; string task_dir = 8; bool resource_limits = 9; - bool basic_process_cgroup = 10 [deprecated=true]; + bool basic_process_cgroup = 10; repeated hashicorp.nomad.plugins.drivers.proto.Mount mounts = 11; repeated hashicorp.nomad.plugins.drivers.proto.Device devices = 12; hashicorp.nomad.plugins.drivers.proto.NetworkIsolationSpec network_isolation = 13; diff --git a/e2e/artifact/input/artifact_limits.nomad b/e2e/artifact/input/artifact_limits.nomad index 48763dc7a2da..87c08517d380 100644 --- a/e2e/artifact/input/artifact_limits.nomad +++ b/e2e/artifact/input/artifact_limits.nomad @@ -25,7 +25,7 @@ job "linux" { task "zip_bomb" { artifact { - source = "https://github.com/hashicorp/go-getter/raw/v1.7.0/testdata/decompress-zip/bomb.zip" + source = "https://github.com/hashicorp/go-getter/blob/v1.7.0/testdata/decompress-zip/bomb.zip" destination = "local/" } diff --git a/e2e/artifact/input/artifact_linux.nomad b/e2e/artifact/input/artifact_linux.nomad index 45be009ee895..0fb4a375a132 100644 --- a/e2e/artifact/input/artifact_linux.nomad +++ b/e2e/artifact/input/artifact_linux.nomad @@ -243,7 +243,7 @@ job "linux" { driver = "docker" config { image = "bash:5" - args = ["-c", "cat local/go.mod && sleep 10"] + args = ["cat", "local/go.mod"] } resources { cpu = 16 @@ -260,7 +260,7 @@ job "linux" { driver = "docker" config { image = "bash:5" - args = ["-c", "cat local/my/path/go.mod && sleep 10"] + args = ["cat", "local/my/path/go.mod"] } resources { cpu = 16 @@ -278,7 +278,7 @@ job "linux" { driver = "docker" config { image = "bash:5" - args = ["-c", "cat ${NOMAD_ALLOC_DIR}/go.mod && sleep 10"] + args = ["cat", "${NOMAD_ALLOC_DIR}/go.mod"] } resources { cpu = 16 @@ -294,7 +294,7 @@ job "linux" { driver = "docker" config { image = "bash:5" - args = ["-c", "cat local/go-set-main/go.mod && sleep 10"] + args = ["cat", "local/go-set-main/go.mod"] } resources { cpu = 16 @@ -311,7 +311,7 @@ job "linux" { driver = "docker" config { image = "bash:5" - args = ["-c", "cat local/my/zip/go-set-main/go.mod && sleep 10"] + args = ["cat", "local/my/zip/go-set-main/go.mod"] } resources { cpu = 16 @@ -328,7 +328,7 @@ job "linux" { driver = "docker" config { image = "bash:5" - args = ["-c", "cat local/repository/go.mod && sleep 10"] + args = ["cat", "local/repository/go.mod"] } resources { cpu = 16 diff --git a/e2e/connect/client.go b/e2e/connect/client.go index e038e174f76e..366016ee8eed 100644 --- a/e2e/connect/client.go +++ b/e2e/connect/client.go @@ -41,8 +41,6 @@ func (tc *ConnectClientStateE2ETest) AfterEach(f *framework.F) { func (tc *ConnectClientStateE2ETest) TestClientRestart(f *framework.F) { t := f.T() - t.Skip("skipping test that does nomad agent restart") - jobID := "connect" + uuid.Generate()[0:8] tc.jobIDs = append(tc.jobIDs, jobID) client := tc.Nomad() diff --git a/e2e/consul/input/consul_wi.nomad.hcl b/e2e/consul/input/consul_wi.nomad.hcl deleted file mode 100644 index ff089aaacc20..000000000000 --- a/e2e/consul/input/consul_wi.nomad.hcl +++ /dev/null @@ -1,62 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -job "example" { - - constraint { - attribute = "${attr.kernel.name}" - value = "linux" - } - - group "example" { - network { - port "db" { - to = 5678 - } - } - - task "example" { - driver = "docker" - - config { - image = "busybox:1" - command = "nc" - args = ["-ll", "-p", "5678", "-e", "/bin/cat"] - - ports = ["db"] - } - - identity { - name = "consul_default" - aud = ["consul.io"] - } - - consul {} - - template { - data = <<-EOT - CONSUL_TOKEN={{ env "CONSUL_TOKEN" }} - EOT - destination = "local/config.txt" - } - - resources { - cpu = 100 - memory = 100 - } - - service { - name = "consul-example" - tags = ["global", "cache"] - port = "db" - - check { - name = "alive" - type = "tcp" - interval = "10s" - timeout = "2s" - } - } - } - } -} diff --git a/e2e/consul/namespaces.go b/e2e/consul/namespaces.go index 7d1fbac25fbf..39602c69afa1 100644 --- a/e2e/consul/namespaces.go +++ b/e2e/consul/namespaces.go @@ -11,7 +11,7 @@ import ( capi "github.com/hashicorp/consul/api" "github.com/hashicorp/nomad/e2e/e2eutil" "github.com/hashicorp/nomad/e2e/framework" - "github.com/shoenig/test/must" + "github.com/hashicorp/nomad/helper" "github.com/stretchr/testify/require" ) @@ -94,8 +94,7 @@ func (tc *ConsulNamespacesE2ETest) AfterAll(f *framework.F) { func (tc *ConsulNamespacesE2ETest) TestNamespacesExist(f *framework.F) { // make sure our namespaces exist + default namespaces := e2eutil.ListConsulNamespaces(f.T(), tc.Consul()) - must.SliceContainsSubset(f.T(), namespaces, allConsulNamespaces, must.Sprintf( - "expected %+v to be a subset of: %+v", allConsulNamespaces, namespaces)) + require.True(f.T(), helper.SliceSetEq(namespaces, append(consulNamespaces, "default"))) } func (tc *ConsulNamespacesE2ETest) testConsulRegisterGroupServices(f *framework.F, token, nsA, nsB, nsC, nsZ string) { diff --git a/e2e/csi/ebs.go b/e2e/csi/ebs.go index be998d9fc96b..0e044527b645 100644 --- a/e2e/csi/ebs.go +++ b/e2e/csi/ebs.go @@ -4,9 +4,7 @@ package csi import ( - "context" "fmt" - "os/exec" "time" "github.com/hashicorp/nomad/e2e/e2eutil" @@ -197,12 +195,8 @@ func (tc *CSIControllerPluginEBSTest) TestVolumeClaim(f *framework.F) { // TestSnapshot exercises the snapshot commands. func (tc *CSIControllerPluginEBSTest) TestSnapshot(f *framework.F) { - // EBS snapshots can take a very long time to run - ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) - defer cancel() - bytes, err := exec.CommandContext(ctx, "nomad", "volume", "snapshot", "create", - tc.volumeIDs[0], "snap-"+tc.uuid).CombinedOutput() - out := string(bytes) + out, err := e2eutil.Command("nomad", "volume", "snapshot", "create", + tc.volumeIDs[0], "snap-"+tc.uuid) requireNoErrorElseDump(f, err, "could not create volume snapshot", tc.pluginJobIDs) snaps, err := e2eutil.ParseColumns(out) diff --git a/e2e/e2eutil/consul.go b/e2e/e2eutil/consul.go index 5fc6aaa38d75..2a25ca54aa31 100644 --- a/e2e/e2eutil/consul.go +++ b/e2e/e2eutil/consul.go @@ -39,7 +39,7 @@ func serviceStatus(require *require.Assertions, client *capi.Client, namespace, // RequireConsulDeregistered asserts that the service eventually is de-registered from Consul. func RequireConsulDeregistered(require *require.Assertions, client *capi.Client, namespace, service string) { - testutil.WaitForResultRetries(10, func() (bool, error) { + testutil.WaitForResultRetries(5, func() (bool, error) { defer time.Sleep(time.Second) services, _, err := client.Health().Service(service, "", false, &capi.QueryOptions{Namespace: namespace}) @@ -203,7 +203,7 @@ func DeleteConsulPolicies(t *testing.T, client *capi.Client, policies map[string // the given policyID in the specified namespace. // // Requires Consul Enterprise. -func CreateConsulToken(t *testing.T, client *capi.Client, namespace, policyID string) (secret, accessor string) { +func CreateConsulToken(t *testing.T, client *capi.Client, namespace, policyID string) string { aclClient := client.ACL() opts := &capi.WriteOptions{Namespace: namespace} @@ -212,7 +212,7 @@ func CreateConsulToken(t *testing.T, client *capi.Client, namespace, policyID st Description: "An e2e test token", }, opts) require.NoError(t, err, "failed to create consul acl token") - return token.SecretID, token.AccessorID + return token.SecretID } // DeleteConsulTokens is used to delete a set of tokens from Consul. diff --git a/e2e/e2eutil/utils.go b/e2e/e2eutil/utils.go index 5e24d658e16c..bf17765cebb2 100644 --- a/e2e/e2eutil/utils.go +++ b/e2e/e2eutil/utils.go @@ -209,8 +209,14 @@ func WaitForAllocNotPending(t *testing.T, nomadClient *api.Client, allocID strin // WaitForJobStopped stops a job and waits for all of its allocs to terminate. func WaitForJobStopped(t *testing.T, nomadClient *api.Client, job string) { - _, _, err := nomadClient.Jobs().Deregister(job, true, nil) + allocs, _, err := nomadClient.Jobs().Allocations(job, true, nil) + require.NoError(t, err, "error getting allocations for job %q", job) + ids := AllocIDsFromAllocationListStubs(allocs) + _, _, err = nomadClient.Jobs().Deregister(job, true, nil) require.NoError(t, err, "error deregistering job %q", job) + for _, id := range ids { + WaitForAllocStopped(t, nomadClient, id) + } } func WaitForAllocsStopped(t *testing.T, nomadClient *api.Client, allocIDs []string) { diff --git a/e2e/isolation/input/chroot_docker.nomad b/e2e/isolation/input/chroot_docker.nomad index 995000a38ee0..b4ff10d7bbde 100644 --- a/e2e/isolation/input/chroot_docker.nomad +++ b/e2e/isolation/input/chroot_docker.nomad @@ -17,7 +17,7 @@ job "chroot_docker" { args = [ "/bin/sh", "-c", - "echo $NOMAD_ALLOC_DIR; echo $NOMAD_TASK_DIR; echo $NOMAD_SECRETS_DIR; echo $PATH; sleep 2" + "echo $NOMAD_ALLOC_DIR; echo $NOMAD_TASK_DIR; echo $NOMAD_SECRETS_DIR; echo $PATH" ] } resources { diff --git a/e2e/isolation/pids_test.go b/e2e/isolation/pids_test.go index 821e2955b158..088e4da6efd4 100644 --- a/e2e/isolation/pids_test.go +++ b/e2e/isolation/pids_test.go @@ -7,7 +7,6 @@ import ( "regexp" "strings" "testing" - "time" "github.com/hashicorp/nomad/e2e/v3/cluster3" "github.com/hashicorp/nomad/e2e/v3/jobs3" @@ -42,7 +41,6 @@ func testExecNamespacePID(t *testing.T) { job, cleanup := jobs3.Submit(t, "./input/exec.hcl", jobs3.WaitComplete("group"), - jobs3.Timeout(time.Second*30), // exec can be a bit slow ) t.Cleanup(cleanup) @@ -54,7 +52,6 @@ func testExecHostPID(t *testing.T) { job, cleanup := jobs3.Submit(t, "./input/exec_host.hcl", jobs3.WaitComplete("group"), - jobs3.Timeout(time.Second*30), // exec can be a bit slow ) t.Cleanup(cleanup) @@ -65,10 +62,7 @@ func testExecHostPID(t *testing.T) { } func testExecNamespaceAllocExec(t *testing.T) { - job, cleanup := jobs3.Submit(t, - "./input/alloc_exec.hcl", - jobs3.Timeout(time.Second*30), // exec can be a bit slow - ) + job, cleanup := jobs3.Submit(t, "./input/alloc_exec.hcl") t.Cleanup(cleanup) logs := job.Exec("group", "sleep", []string{"ps", "ax"}) @@ -82,7 +76,6 @@ func testJavaNamespacePID(t *testing.T) { job, cleanup := jobs3.Submit(t, "./input/java.hcl", jobs3.WaitComplete("group"), - jobs3.Timeout(time.Second*60), // exec prestart + java main ) t.Cleanup(cleanup) @@ -94,7 +87,6 @@ func testJavaHostPID(t *testing.T) { job, cleanup := jobs3.Submit(t, "./input/java_host.hcl", jobs3.WaitComplete("group"), - jobs3.Timeout(time.Second*60), // exec prestart + java main ) t.Cleanup(cleanup) @@ -105,10 +97,7 @@ func testJavaHostPID(t *testing.T) { } func testJavaNamespaceAllocExec(t *testing.T) { - job, cleanup := jobs3.Submit(t, - "./input/alloc_exec_java.hcl", - jobs3.Timeout(time.Second*60), // exec prestart + java main - ) + job, cleanup := jobs3.Submit(t, "./input/alloc_exec_java.hcl") t.Cleanup(cleanup) logs := job.Exec("group", "sleep", []string{"ps", "ax"}) diff --git a/e2e/metrics/metrics_test.go b/e2e/metrics/metrics_test.go index 2eec7d001ca4..e5057d7b3ea1 100644 --- a/e2e/metrics/metrics_test.go +++ b/e2e/metrics/metrics_test.go @@ -41,9 +41,9 @@ func (m *metric) Query() string { } func TestMetrics(t *testing.T) { - // Run via the e2e suite. Requires AWS attributes. + // Run via the e2e suite; requires Windows and AWS specific attributes. - // Wait for the cluster to be ready. + // Wait for the cluster to be ready cluster3.Establish(t, cluster3.Leader(), cluster3.LinuxClients(1), diff --git a/e2e/oversubscription/input/rawexec.hcl b/e2e/oversubscription/input/rawexec.hcl deleted file mode 100644 index c5e26bb33b6a..000000000000 --- a/e2e/oversubscription/input/rawexec.hcl +++ /dev/null @@ -1,38 +0,0 @@ -# Copyright (c) HashiCorp, Inc. -# SPDX-License-Identifier: BUSL-1.1 - -job "oversub" { - type = "batch" - - constraint { - attribute = "${attr.kernel.name}" - value = "linux" - } - - group "group" { - reschedule { - attempts = 0 - unlimited = false - } - - restart { - attempts = 0 - mode = "fail" - } - - task "cat" { - driver = "raw_exec" - - config { - command = "bash" - args = ["-c", "cat /sys/fs/cgroup/$(cat /proc/self/cgroup | cut -d':' -f3)/memory.max"] - } - - resources { - cpu = 100 - memory = 64 - memory_max = 128 - } - } - } -} diff --git a/e2e/oversubscription/oversubscription_test.go b/e2e/oversubscription/oversubscription_test.go index e0a5db54d31a..b0c7d11cbc9f 100644 --- a/e2e/oversubscription/oversubscription_test.go +++ b/e2e/oversubscription/oversubscription_test.go @@ -4,9 +4,6 @@ package oversubscription import ( - "fmt" - "regexp" - "strings" "testing" "time" @@ -15,7 +12,6 @@ import ( "github.com/hashicorp/nomad/e2e/v3/cluster3" "github.com/hashicorp/nomad/e2e/v3/jobs3" "github.com/shoenig/test/must" - "github.com/shoenig/test/wait" ) var ( @@ -39,8 +35,6 @@ func TestOversubscription(t *testing.T) { t.Run("testDocker", testDocker) t.Run("testExec", testExec) - t.Run("testRawExec", testRawExec) - t.Run("testRawExecMax", testRawExecMax) } func testDocker(t *testing.T) { @@ -57,41 +51,13 @@ func testExec(t *testing.T) { job, jobCleanup := jobs3.Submit(t, "./input/exec.hcl") t.Cleanup(jobCleanup) - testFunc := func() error { - // job will cat /sys/fs/cgroup/nomad.slice/share.slice/.sleep.scope/memory.max - // which should be set to the 30 megabyte memory_max value - expect := "31457280" - logs := job.TaskLogs("group", "cat") - if !strings.Contains(logs.Stdout, expect) { - return fmt.Errorf("expect '%s' in stdout, got: '%s'", expect, logs.Stdout) - } - return nil - } - - // wait for poststart to run, up to 20 seconds - must.Wait(t, wait.InitialSuccess( - wait.ErrorFunc(testFunc), - wait.Timeout(time.Second*20), - wait.Gap(time.Second*2), - )) -} - -func testRawExec(t *testing.T) { - job, cleanup := jobs3.Submit(t, "./input/rawexec.hcl") - t.Cleanup(cleanup) + // wait for poststart + time.Sleep(10 * time.Second) + // job will cat /sys/fs/cgroup/nomad.slice/share.slice/.sleep.scope/memory.max + // which should be set to the 30 megabyte memory_max value logs := job.TaskLogs("group", "cat") - must.StrContains(t, logs.Stdout, "134217728") // 128 mb memory_max -} - -func testRawExecMax(t *testing.T) { - job, cleanup := jobs3.Submit(t, "./input/rawexecmax.hcl") - t.Cleanup(cleanup) - - // will print memory.low then memory.max - logs := job.TaskLogs("group", "cat") - logsRe := regexp.MustCompile(`67108864\s+max`) - must.RegexMatch(t, logsRe, logs.Stdout) + must.StrContains(t, logs.Stdout, "31457280") } func captureSchedulerConfiguration(t *testing.T) { diff --git a/e2e/oversubscription/input/rawexecmax.hcl b/e2e/rawexec/input/oversubmax.hcl similarity index 100% rename from e2e/oversubscription/input/rawexecmax.hcl rename to e2e/rawexec/input/oversubmax.hcl diff --git a/e2e/rawexec/rawexec_test.go b/e2e/rawexec/rawexec_test.go index 8907e797e16d..6c52a93d8a37 100644 --- a/e2e/rawexec/rawexec_test.go +++ b/e2e/rawexec/rawexec_test.go @@ -4,6 +4,7 @@ package rawexec import ( + "regexp" "testing" "github.com/hashicorp/nomad/e2e/v3/cluster3" @@ -18,6 +19,8 @@ func TestRawExec(t *testing.T) { ) t.Run("testOomAdj", testOomAdj) + t.Run("testOversubMemory", testOversubMemory) + t.Run("testOversubMemoryUnlimited", testOversubMemoryUnlimited) } func testOomAdj(t *testing.T) { @@ -27,3 +30,21 @@ func testOomAdj(t *testing.T) { logs := job.TaskLogs("group", "cat") must.StrContains(t, logs.Stdout, "0") } + +func testOversubMemory(t *testing.T) { + job, cleanup := jobs3.Submit(t, "./input/oversub.hcl") + t.Cleanup(cleanup) + + logs := job.TaskLogs("group", "cat") + must.StrContains(t, logs.Stdout, "134217728") // 128 mb memory_max +} + +func testOversubMemoryUnlimited(t *testing.T) { + job, cleanup := jobs3.Submit(t, "./input/oversubmax.hcl") + t.Cleanup(cleanup) + + // will print memory.low then memory.max + logs := job.TaskLogs("group", "cat") + logsRe := regexp.MustCompile(`67108864\s+max`) + must.RegexMatch(t, logsRe, logs.Stdout) +} diff --git a/e2e/terraform/.terraform.lock.hcl b/e2e/terraform/.terraform.lock.hcl index c78e6542511d..80680554cec9 100644 --- a/e2e/terraform/.terraform.lock.hcl +++ b/e2e/terraform/.terraform.lock.hcl @@ -4,10 +4,8 @@ provider "registry.terraform.io/hashicorp/aws" { version = "4.10.0" hashes = [ - "h1:3zeyl8QwNYPXRD4b++0Vo9nBcsL3FXT+DT3x/KJNKB0=", "h1:F9BjbxBhuo1A/rP318IUrkW3TAh29i6UC18qwhzCs6c=", "h1:S6xGPRL08YEuBdemiYZyIBf/YwM4OCvzVuaiuU6kLjc=", - "h1:pjPLizna1qa/CZh7HvLuQ73YmqaunLXatyOqzF2ePEI=", "zh:0a2a7eabfeb7dbb17b7f82aff3fa2ba51e836c15e5be4f5468ea44bd1299b48d", "zh:23409c7205d13d2d68b5528e1c49e0a0455d99bbfec61eb0201142beffaa81f7", "zh:3adad2245d97816f3919778b52c58fb2de130938a3e9081358bfbb72ec478d9a", @@ -26,8 +24,6 @@ provider "registry.terraform.io/hashicorp/aws" { provider "registry.terraform.io/hashicorp/consul" { version = "2.15.1" hashes = [ - "h1:AifunnimRF25th48scFReck5SmYFOCpSQRmfHBrQ4t4=", - "h1:ICBwtLcfVf6Ei3kVakgUR5hUkuZaeBtmdxzltgeDHBo=", "h1:PexyQBRLDA+SR+sWlzYBZswry5O5h/tTfj87CaECtLc=", "h1:XGOBrMc6OQsNpgQtgtV6H0/jYe7yVIYxEDsErV/R6SE=", "zh:1806830a3cf103e65e772a7d28fd4df2788c29a029fb2def1326bc777ad107ed", @@ -48,8 +44,6 @@ provider "registry.terraform.io/hashicorp/consul" { provider "registry.terraform.io/hashicorp/external" { version = "2.2.2" hashes = [ - "h1:/Qsdu8SIXbfANKJFs1UTAfvcomJUalOd3uDZvj3jixA=", - "h1:BKQ5f5ijzeyBSnUr+j0wUi+bYv6KBQVQNDXNRVEcfJE=", "h1:VUkgcWvCliS0HO4kt7oEQhFD2gcx/59XpwMqxfCU1kE=", "h1:e7RpnZ2PbJEEPnfsg7V0FNwbfSk0/Z3FdrLsXINBmDY=", "zh:0b84ab0af2e28606e9c0c1289343949339221c3ab126616b831ddb5aaef5f5ca", @@ -72,8 +66,6 @@ provider "registry.terraform.io/hashicorp/hcp" { hashes = [ "h1:B5O/NawTnKPdUgUlGP/mM2ybv0RcLvVJVOcrivDdFnI=", "h1:C0KoYT09Ff91pE5KzrFrISCE5wQyJaJnxPdA0SXDOzI=", - "h1:f4IwCK9heo5F+k+nRFY/fzG18DesbBcqRL8F4WsKh7Q=", - "h1:fCHcXVlT/MoAqvIUjFyJqtGrz+ebHNCcR1YM2ZSRPxE=", "zh:0fa82a384b25a58b65523e0ea4768fa1212b1f5cfc0c9379d31162454fedcc9d", "zh:6fa5415dbac9c8d20026772dd5aee7dd3ac541e9d86827d0b70bc752472ec76c", "zh:7490212c32339153165aec1dcef063804aac0d3f1cfbdfd3d04d7a60c29b0f40", @@ -93,8 +85,6 @@ provider "registry.terraform.io/hashicorp/local" { version = "2.2.2" hashes = [ "h1:5UYW2wJ320IggrzLt8tLD6MowePqycWtH1b2RInHZkE=", - "h1:BVEZnjtpWxKPG9OOQh4dFa1z5pwMO/uuzYtu6AR2LyM=", - "h1:S6nf97sybBugc8FtrOSPXaynEKx0gO6Oktu6KJzvdDU=", "h1:SjDyZXIUHEQzZe10VjhlhZq2a9kgQB6tmqJcpq2BeWg=", "zh:027e4873c69da214e2fed131666d5de92089732a11d096b68257da54d30b6f9d", "zh:0ba2216e16cfb72538d76a4c4945b4567a76f7edbfef926b1c5a08d7bba2a043", @@ -116,8 +106,6 @@ provider "registry.terraform.io/hashicorp/null" { hashes = [ "h1:71sNUDvmiJcijsvfXpiLCz0lXIBSsEJjMxljt7hxMhw=", "h1:Pctug/s/2Hg5FJqjYcTM0kPyx3AoYK1MpRWO0T9V2ns=", - "h1:YvH6gTaQzGdNv+SKTZujU1O0bO+Pw6vJHOPhqgN8XNs=", - "h1:ZD4wyZ0KJzt5s2mD0xD7paJlVONNicLvZKdgtezz02I=", "zh:063466f41f1d9fd0dd93722840c1314f046d8760b1812fa67c34de0afcba5597", "zh:08c058e367de6debdad35fc24d97131c7cf75103baec8279aba3506a08b53faf", "zh:73ce6dff935150d6ddc6ac4a10071e02647d10175c173cfe5dca81f3d13d8afe", @@ -138,8 +126,6 @@ provider "registry.terraform.io/hashicorp/random" { hashes = [ "h1:5A5VsY5wNmOZlupUcLnIoziMPn8htSZBXbP3lI7lBEM=", "h1:9A6Ghjgad0KjJRxa6nPo8i8uFvwj3Vv0wnEgy49u+24=", - "h1:JF+aiOtS0G0ffbBdk1qfj7IrT39y/GZh/yl2IhqcIVM=", - "h1:hxN/z2AVJkF2ei7bfevJdD1B0WfyABxxk9j1zzLsLRk=", "zh:0daceba867b330d3f8e2c5dc895c4291845a78f31955ce1b91ab2c4d1cd1c10b", "zh:104050099efd30a630741f788f9576b19998e7a09347decbec3da0b21d64ba2d", "zh:173f4ef3fdf0c7e2564a3db0fac560e9f5afdf6afd0b75d6646af6576b122b16", @@ -155,12 +141,29 @@ provider "registry.terraform.io/hashicorp/random" { ] } +provider "registry.terraform.io/hashicorp/template" { + version = "2.2.0" + hashes = [ + "h1:0PGmlQJDT2HHYSryvhnhvd9P5UzMZ3KX3YyMNsOYXU0=", + "h1:0wlehNaxBX7GJQnPfQwTNvvAf38Jm0Nv7ssKGMaG6Og=", + "h1:94qn780bi1qjrbC3uQtjJh3Wkfwd5+tTtJHOb7KTg9w=", + "zh:01702196f0a0492ec07917db7aaa595843d8f171dc195f4c988d2ffca2a06386", + "zh:09aae3da826ba3d7df69efeb25d146a1de0d03e951d35019a0f80e4f58c89b53", + "zh:09ba83c0625b6fe0a954da6fbd0c355ac0b7f07f86c91a2a97849140fea49603", + "zh:0e3a6c8e16f17f19010accd0844187d524580d9fdb0731f675ffcf4afba03d16", + "zh:45f2c594b6f2f34ea663704cc72048b212fe7d16fb4cfd959365fa997228a776", + "zh:77ea3e5a0446784d77114b5e851c970a3dde1e08fa6de38210b8385d7605d451", + "zh:8a154388f3708e3df5a69122a23bdfaf760a523788a5081976b3d5616f7d30ae", + "zh:992843002f2db5a11e626b3fc23dc0c87ad3729b3b3cff08e32ffb3df97edbde", + "zh:ad906f4cebd3ec5e43d5cd6dc8f4c5c9cc3b33d2243c89c5fc18f97f7277b51d", + "zh:c979425ddb256511137ecd093e23283234da0154b7fa8b21c2687182d9aea8b2", + ] +} + provider "registry.terraform.io/hashicorp/tls" { version = "3.3.0" hashes = [ "h1:A4xOtHhD4jCmn4nO1xCTk2Nl5IP5JpjicjF+Fuu2ZFQ=", - "h1:Uf8HqbZjYn8pKB0og2H9A8IXIKtHT+o8BE3+fjtO1ZQ=", - "h1:oitTcxYGyDvHuNsjPJUi00a+AT0k+TWgNsGUSM2CV/E=", "h1:xx/b39Q9FVZSlDc97rlDmQ9dNaaxFFyVzP9kV+47z28=", "zh:16140e8cc880f95b642b6bf6564f4e98760e9991864aacc8e21273423571e561", "zh:16338b8457759c97fdd73153965d6063b037f2954fd512e569fcdc42b7fef743", @@ -181,9 +184,7 @@ provider "registry.terraform.io/hashicorp/vault" { version = "3.4.1" hashes = [ "h1:HIjd/7KktGO5E/a0uICbIanUj0Jdd0j8aL/r+QxFhAs=", - "h1:X8P4B/zB97Dtj21qp0Rrswlz92WYCA5C59jpYGZeQuc=", "h1:dXJBo807u69+Uib2hjoBQ68G2+nGXcNZeq/THVyQQVc=", - "h1:oow6cAwKiFpJBBWKsDqNmwZIrFTWWvoeIbqs+vyUDE0=", "zh:1eb8370a1846e34e2bcc4d11eece5733735784a8eab447bbed3cfd822101b577", "zh:2df3989327cea68b2167514b7ebddc67b09340f00bbf3fa85df03c97adfb9d25", "zh:3dd1e317264f574985e856296deef71a76464918bf0566eb0d7f6389ea0586bd", diff --git a/e2e/terraform/README.md b/e2e/terraform/README.md index c3e94fe7d406..bd298d873f00 100644 --- a/e2e/terraform/README.md +++ b/e2e/terraform/README.md @@ -17,12 +17,6 @@ HCP. This Terraform stack assumes that an appropriate instance role has been configured elsewhere and that you have the ability to `AssumeRole` into the AWS account. -If you're trying to provision the cluster from macOS on Apple Silicon hardware, -you will also need Nomad Linux binaries for x86_64 architecture. Since it's -currently impossible to cross-compile Nomad for Linux on macOS, you need to grab -a Nomad binary from [releases page](https://releases.hashicorp.com/nomad/) and -put it in `../pkg/linux_amd64` directory before running Terraform. - Configure the following environment variables. For HashiCorp Nomad developers, this configuration can be found in 1Pass in the Nomad team's vault under `nomad-e2e`. diff --git a/e2e/terraform/ecs.tf b/e2e/terraform/ecs.tf index 9c1c27e72caf..6995a79595e1 100644 --- a/e2e/terraform/ecs.tf +++ b/e2e/terraform/ecs.tf @@ -18,11 +18,15 @@ resource "aws_ecs_task_definition" "nomad_rtd_e2e" { memory = 512 } +data "template_file" "ecs_vars_hcl" { + template = <complete allocation")) @@ -114,10 +118,6 @@ func TestVaultSecrets(t *testing.T) { renderedCert := waitForAllocSecret(t, submission, "/secrets/certificate.crt", "BEGIN CERTIFICATE") waitForAllocSecret(t, submission, "/secrets/access.key", secretValue) - // record the earliest we can guaranteee that the vault lease TTL has - // started, so we don't have to wait excessively later on - ttlStart := time.Now() - var re = regexp.MustCompile(`VAULT_TOKEN=(.*)`) // check vault token was written and save it for later comparison diff --git a/go.mod b/go.mod index d7607e6928b2..9d97f13dc089 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( github.com/Masterminds/sprig/v3 v3.2.3 github.com/Microsoft/go-winio v0.6.1 github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e - github.com/armon/go-metrics v0.5.3 + github.com/armon/go-metrics v0.4.1 github.com/aws/aws-sdk-go v1.44.184 github.com/brianvoe/gofakeit/v6 v6.20.1 github.com/container-storage-interface/spec v1.7.0 @@ -33,7 +33,7 @@ require ( github.com/docker/go-units v0.5.0 github.com/dustin/go-humanize v1.0.1 github.com/elazarl/go-bindata-assetfs v1.0.1 - github.com/fatih/color v1.16.0 + github.com/fatih/color v1.15.0 github.com/fsouza/go-dockerclient v1.10.1 github.com/go-jose/go-jose/v3 v3.0.3 github.com/golang-jwt/jwt/v5 v5.0.0 @@ -57,18 +57,18 @@ require ( github.com/hashicorp/go-discover v0.0.0-20220621183603-a413e131e836 github.com/hashicorp/go-envparse v0.0.0-20180119215841-310ca1881b22 github.com/hashicorp/go-getter v1.7.0 - github.com/hashicorp/go-hclog v1.6.2 + github.com/hashicorp/go-hclog v1.5.0 github.com/hashicorp/go-immutable-radix/v2 v2.1.0 github.com/hashicorp/go-kms-wrapping/v2 v2.0.15 github.com/hashicorp/go-memdb v1.3.4 - github.com/hashicorp/go-msgpack/v2 v2.1.2 + github.com/hashicorp/go-msgpack v1.1.6-0.20240304204939-8824e8ccc35f github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-netaddrs v0.1.0 github.com/hashicorp/go-plugin v1.6.0 github.com/hashicorp/go-secure-stdlib/listenerutil v0.1.4 github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 github.com/hashicorp/go-set/v2 v2.1.0 - github.com/hashicorp/go-sockaddr v1.0.5 + github.com/hashicorp/go-sockaddr v1.0.2 github.com/hashicorp/go-syslog v1.0.0 github.com/hashicorp/go-uuid v1.0.3 github.com/hashicorp/go-version v1.6.0 @@ -77,13 +77,13 @@ require ( github.com/hashicorp/hcl/v2 v2.9.2-0.20220525143345-ab3cae0737bc github.com/hashicorp/hil v0.0.0-20210521165536-27a72121fd40 github.com/hashicorp/logutils v1.0.0 - github.com/hashicorp/memberlist v0.5.1 - github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 + github.com/hashicorp/memberlist v0.5.0 + github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 github.com/hashicorp/nomad/api v0.0.0-20230103221135-ce00d683f9be - github.com/hashicorp/raft v1.6.1 + github.com/hashicorp/raft v1.5.0 github.com/hashicorp/raft-autopilot v0.1.6 - github.com/hashicorp/raft-boltdb/v2 v2.3.0 - github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa + github.com/hashicorp/raft-boltdb/v2 v2.2.2 + github.com/hashicorp/serf v0.10.1 github.com/hashicorp/vault/api v1.10.0 github.com/hashicorp/yamux v0.1.1 github.com/hpcloud/tail v1.0.1-0.20170814160653-37f427138745 @@ -91,7 +91,7 @@ require ( github.com/kr/pretty v0.3.1 github.com/kr/text v0.2.0 github.com/mattn/go-colorable v0.1.13 - github.com/miekg/dns v1.1.56 + github.com/miekg/dns v1.1.50 github.com/mitchellh/cli v1.1.5 github.com/mitchellh/colorstring v0.0.0-20150917214807-8631ce90f286 github.com/mitchellh/copystructure v1.2.0 @@ -118,17 +118,17 @@ require ( github.com/shirou/gopsutil/v3 v3.23.9 github.com/shoenig/go-landlock v1.2.0 github.com/shoenig/go-m1cpu v0.1.6 - github.com/shoenig/test v1.7.1 + github.com/shoenig/test v1.7.0 github.com/stretchr/testify v1.8.4 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 github.com/zclconf/go-cty v1.12.1 github.com/zclconf/go-cty-yaml v1.0.3 - go.etcd.io/bbolt v1.3.9 + go.etcd.io/bbolt v1.3.7 go.uber.org/goleak v1.2.1 golang.org/x/crypto v0.19.0 golang.org/x/exp v0.0.0-20231006140011-7918f672742d golang.org/x/sync v0.6.0 - golang.org/x/sys v0.18.0 + golang.org/x/sys v0.17.0 golang.org/x/time v0.3.0 google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.33.0 @@ -198,7 +198,7 @@ require ( github.com/gojuno/minimock/v3 v3.0.6 // indirect github.com/golang-jwt/jwt/v4 v4.4.3 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/google/btree v1.1.2 // indirect + github.com/google/btree v1.0.1 // indirect github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect github.com/google/s2a-go v0.1.4 // indirect github.com/google/uuid v1.3.1 // indirect @@ -209,14 +209,13 @@ require ( github.com/gorilla/mux v1.8.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect - github.com/hashicorp/go-msgpack v1.1.6-0.20240304204939-8824e8ccc35f // indirect github.com/hashicorp/go-retryablehttp v0.7.2 // indirect github.com/hashicorp/go-rootcerts v1.0.2 // indirect github.com/hashicorp/go-safetemp v1.0.0 // indirect github.com/hashicorp/go-secure-stdlib/parseutil v0.1.7 // indirect github.com/hashicorp/go-secure-stdlib/reloadutil v0.1.1 // indirect github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 // indirect - github.com/hashicorp/golang-lru v1.0.2 // indirect + github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/hashicorp/mdns v1.0.4 // indirect github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 // indirect github.com/hashicorp/vic v1.5.1-0.20190403131502-bbfe86ec9443 // indirect @@ -230,7 +229,7 @@ require ( github.com/klauspost/compress v1.16.0 // indirect github.com/linode/linodego v0.7.1 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-isatty v0.0.17 // indirect github.com/mattn/go-runewidth v0.0.12 // indirect github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 // indirect github.com/mitchellh/go-wordwrap v1.0.1 // indirect diff --git a/go.sum b/go.sum index 7f9f441e745a..0ce08e8afa8c 100644 --- a/go.sum +++ b/go.sum @@ -414,9 +414,10 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= @@ -512,8 +513,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= -github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= +github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= +github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -639,8 +640,8 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= -github.com/hashicorp/go-hclog v1.6.2 h1:NOtoftovWkDheyUM/8JW3QMiXyxJK3uHRK7wV04nD2I= -github.com/hashicorp/go-hclog v1.6.2/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= @@ -651,12 +652,12 @@ github.com/hashicorp/go-kms-wrapping/v2 v2.0.15 h1:f3+/VbanXOmVAaDBKwRiVmeL7EX34 github.com/hashicorp/go-kms-wrapping/v2 v2.0.15/go.mod h1:0dWtzl2ilqKpavgM3id/kFK9L3tjo6fS4OhbVPSYpnQ= github.com/hashicorp/go-memdb v1.3.4 h1:XSL3NR682X/cVk2IeV0d70N4DZ9ljI885xAEU8IoK3c= github.com/hashicorp/go-memdb v1.3.4/go.mod h1:uBTr1oQbtuMgd1SSGoR8YV27eT3sBHbYiNm53bMpgSg= +github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v1.1.6-0.20240304204939-8824e8ccc35f h1:/xqzTen8ftnKv3cKa87WEoOLtsDJYFU0ArjrKaPTTkc= github.com/hashicorp/go-msgpack v1.1.6-0.20240304204939-8824e8ccc35f/go.mod h1:gWVc3sv/wbDmR3rQsj1CAktEZzoz1YNK9NfGLXJ69/4= -github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0= -github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/hashicorp/go-netaddrs v0.1.0 h1:TnlYvODD4C/wO+j7cX1z69kV5gOzI87u3OcUinANaW8= @@ -687,12 +688,13 @@ github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2 h1:phcbL8urUzF/kxA/Oj6awENa github.com/hashicorp/go-secure-stdlib/tlsutil v0.1.2/go.mod h1:l8slYwnJA26yBz+ErHpp2IRCLr0vuOMGBORIz4rRiAs= github.com/hashicorp/go-set/v2 v2.1.0 h1:iERPCQWks+I+4bTgy0CT2myZsCqNgBg79ZHqwniohXo= github.com/hashicorp/go-set/v2 v2.1.0/go.mod h1:6q4nh8UCVZODn2tJ5RbJi8+ki7pjZBsAEYGt6yaGeTo= +github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= +github.com/hashicorp/go-sockaddr v1.0.2 h1:ztczhD1jLxIRjVejw8gFomI1BQZOe2WoVOu0SyteCQc= github.com/hashicorp/go-sockaddr v1.0.2/go.mod h1:rB4wwRAUzs07qva3c5SdrY/NEtAUjGlgmH/UkBUC97A= -github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU= -github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI= github.com/hashicorp/go-syslog v1.0.0 h1:KaodqZuhUoZereWVIYmpUgZysurB1kBLX2j0MwMrUAE= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8= github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= @@ -700,9 +702,8 @@ github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mO github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c= -github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru/v2 v2.0.1 h1:5pv5N1lT1fjLg2VQ5KWc7kmucp2x/kvFOnxuVTqZ6x4= github.com/hashicorp/golang-lru/v2 v2.0.1/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.1-0.20201016140508-a07e7d50bbee h1:8B4HqvMUtYSjsGkYjiQGStc9pXffY2J+Z2SPQAj+wMY= @@ -716,22 +717,23 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4 h1:sY0CMhFmjIPDMlTB+HfymFHCaYLhgifZ0QhjaYKD/UQ= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= -github.com/hashicorp/memberlist v0.5.1 h1:mk5dRuzeDNis2bi6LLoQIXfMH7JQvAzt3mQD0vNZZUo= -github.com/hashicorp/memberlist v0.5.1/go.mod h1:zGDXV6AqbDTKTM6yxW0I4+JtFzZAJVoIPvss4hV8F24= -github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0 h1:kBpVVl1sl3MaSrs97e0+pDQhSrqJv9gVbSUrPpVfl1w= -github.com/hashicorp/net-rpc-msgpackrpc/v2 v2.0.0/go.mod h1:6pdNz0vo0mF0GvhwDG56O3N18qBrAz/XRIcfINfTbwo= +github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM= +github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0= +github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69 h1:lc3c72qGlIMDqQpQH82Y4vaglRMMFdJbziYWriR4UcE= +github.com/hashicorp/net-rpc-msgpackrpc v0.0.0-20151116020338-a14192a58a69/go.mod h1:/z+jUGRBlwVpUZfjute9jWaF6/HuhjuFQuL1YXzVD1Q= +github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= -github.com/hashicorp/raft v1.6.1 h1:v/jm5fcYHvVkL0akByAp+IDdDSzCNCGhdO6VdB56HIM= -github.com/hashicorp/raft v1.6.1/go.mod h1:N1sKh6Vn47mrWvEArQgILTyng8GoDRNYlgKyK7PMjs0= +github.com/hashicorp/raft v1.5.0 h1:uNs9EfJ4FwiArZRxxfd/dQ5d33nV31/CdCHArH89hT8= +github.com/hashicorp/raft v1.5.0/go.mod h1:pKHB2mf/Y25u3AHNSXVRv+yT+WAnmeTX0BwVppVQV+M= github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= -github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702 h1:RLKEcCuKcZ+qp2VlaaZsYZfLOmIiuJNpEi48Rl8u9cQ= -github.com/hashicorp/raft-boltdb v0.0.0-20230125174641-2a8082862702/go.mod h1:nTakvJ4XYq45UXtn0DbwR4aU9ZdjlnIenpbs6Cd+FM0= -github.com/hashicorp/raft-boltdb/v2 v2.3.0 h1:fPpQR1iGEVYjZ2OELvUHX600VAK5qmdnDEv3eXOwZUA= -github.com/hashicorp/raft-boltdb/v2 v2.3.0/go.mod h1:YHukhB04ChJsLHLJEUD6vjFyLX2L3dsX3wPBZcX4tmc= -github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa h1:UXgK+AZPfeQ1vOXXXfBj7C7mZpWUgRFcMAKpyyYrYgU= -github.com/hashicorp/serf v0.10.2-0.20240320153621-5d32001edfaa/go.mod h1:RiISHML4PEb0ZN6S6uNW04TO8D6EUtTIOpCzzDnZeGk= +github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea h1:RxcPJuutPRM8PUOyiweMmkuNO+RJyfy2jds2gfvgNmU= +github.com/hashicorp/raft-boltdb v0.0.0-20210409134258-03c10cc3d4ea/go.mod h1:qRd6nFJYYS6Iqnc/8HcUmko2/2Gw8qTFEmxDLii6W5I= +github.com/hashicorp/raft-boltdb/v2 v2.2.2 h1:rlkPtOllgIcKLxVT4nutqlTH2NRFn+tO1wwZk/4Dxqw= +github.com/hashicorp/raft-boltdb/v2 v2.2.2/go.mod h1:N8YgaZgNJLpZC+h+by7vDu5rzsRgONThTEeUS3zWbfY= +github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY= +github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4= github.com/hashicorp/vault/api v1.10.0 h1:/US7sIjWN6Imp4o/Rj1Ce2Nr5bki/AXi9vAW3p2tOJQ= github.com/hashicorp/vault/api v1.10.0/go.mod h1:jo5Y/ET+hNyz+JnKDt8XLAdKs+AM0G5W0Vp1IrFI8N8= github.com/hashicorp/vault/api/auth/kubernetes v0.5.0 h1:CXO0fD7M3iCGovP/UApeHhPcH4paDFKcu7AjEXi94rI= @@ -819,11 +821,12 @@ github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= +github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= +github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= @@ -832,10 +835,12 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5 github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0 h1:jWpvCLoY8Z/e3VKvlsiIGKtc+UG6U5vzxaoagmhXfyg= github.com/matttproud/golang_protobuf_extensions/v2 v2.0.0/go.mod h1:QUyp042oQthUoa9bqDv0ER0wrtXnBruoNd7aNjkbP+k= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= -github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= +github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= +github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= +github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/cli v1.1.5 h1:OxRIeJXpAMztws/XHlN2vu6imG5Dpq+j61AzAX5fLng= github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= @@ -859,6 +864,7 @@ github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQ github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= github.com/mitchellh/hashstructure v1.1.0 h1:P6P1hdjqAAknpY/M1CGipelZgp+4y9ja9kmUZPXP+H0= github.com/mitchellh/hashstructure v1.1.0/go.mod h1:xUDAozZz0Wmdiufv0uyhnHkUTN6/6d8ulp4AwfLKrmA= +github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -935,6 +941,7 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= +github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= @@ -986,6 +993,7 @@ github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjR github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= github.com/rs/cors v1.8.3 h1:O+qNyWn7Z+F9M0ILBHgMVPuB1xTOucVd5gtaYyXBpRo= github.com/rs/cors v1.8.3/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= +github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/ryanuber/columnize v2.1.2+incompatible h1:C89EOx/XBWwIXl8wm8OPJBd7kPF25UfsK2X7Ph/zCAk= github.com/ryanuber/columnize v2.1.2+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= @@ -1004,8 +1012,8 @@ github.com/shoenig/go-landlock v1.2.0/go.mod h1:S848L96G6iny3xexNb4sXUrKwEDIy5ul github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= -github.com/shoenig/test v1.7.1 h1:UJcjSAI3aUKx52kfcfhblgyhZceouhvvs3OYdWgn+PY= -github.com/shoenig/test v1.7.1/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= +github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk= +github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -1041,6 +1049,7 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 h1:kdXcSzyDtseVEc4yCz2qF8ZrQvIDBJLl4S1c3GCXmoI= @@ -1086,8 +1095,9 @@ github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeW github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= github.com/zclconf/go-cty-yaml v1.0.3 h1:og/eOQ7lvA/WWhHGFETVWNduJM7Rjsv2RRpx1sdFMLc= github.com/zclconf/go-cty-yaml v1.0.3/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= -go.etcd.io/bbolt v1.3.9 h1:8x7aARPEXiXbHmtUwAIv7eV2fQFHrLLavdiJ3uzJXoI= -go.etcd.io/bbolt v1.3.9/go.mod h1:zaO32+Ti0PK1ivdPtgMESzuzL2VPoIG1PCQNvOdo/dE= +go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= +go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= +go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -1111,6 +1121,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1186,6 +1197,7 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1214,6 +1226,7 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= @@ -1300,10 +1313,13 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1373,13 +1389,11 @@ golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1428,6 +1442,7 @@ golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1470,6 +1485,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= diff --git a/helper/boltdd/boltdd.go b/helper/boltdd/boltdd.go index b12e08802408..3b961911a0bf 100644 --- a/helper/boltdd/boltdd.go +++ b/helper/boltdd/boltdd.go @@ -11,7 +11,7 @@ import ( "os" "sync" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/nomad/structs" "go.etcd.io/bbolt" "golang.org/x/crypto/blake2b" diff --git a/helper/boltdd/boltdd_test.go b/helper/boltdd/boltdd_test.go index e313dda6d61a..545c37513438 100644 --- a/helper/boltdd/boltdd_test.go +++ b/helper/boltdd/boltdd_test.go @@ -9,7 +9,7 @@ import ( "path/filepath" "testing" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" diff --git a/helper/pluginutils/hclutils/testing.go b/helper/pluginutils/hclutils/testing.go index 9cf3f16d164d..5e5e6bcf070e 100644 --- a/helper/pluginutils/hclutils/testing.go +++ b/helper/pluginutils/hclutils/testing.go @@ -6,7 +6,7 @@ package hclutils import ( "testing" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/hcl" "github.com/hashicorp/hcl/hcl/ast" "github.com/hashicorp/nomad/helper/pluginutils/hclspecutils" diff --git a/helper/pluginutils/hclutils/types.go b/helper/pluginutils/hclutils/types.go index d117d991271e..478909b049e2 100644 --- a/helper/pluginutils/hclutils/types.go +++ b/helper/pluginutils/hclutils/types.go @@ -4,7 +4,7 @@ package hclutils import ( - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" ) // MapStrInt is a wrapper for map[string]int that handles diff --git a/helper/pluginutils/hclutils/util.go b/helper/pluginutils/hclutils/util.go index df56d846a2b9..a1c1eb94914f 100644 --- a/helper/pluginutils/hclutils/util.go +++ b/helper/pluginutils/hclutils/util.go @@ -8,7 +8,7 @@ import ( "errors" "fmt" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" hcl "github.com/hashicorp/hcl/v2" "github.com/hashicorp/hcl/v2/hcldec" hjson "github.com/hashicorp/hcl/v2/json" diff --git a/helper/pool/pool.go b/helper/pool/pool.go index 14a6aadfb1c8..12e435b0c251 100644 --- a/helper/pool/pool.go +++ b/helper/pool/pool.go @@ -15,7 +15,7 @@ import ( "time" hclog "github.com/hashicorp/go-hclog" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/tlsutil" "github.com/hashicorp/nomad/nomad/structs" diff --git a/helper/raftutil/msgpack.go b/helper/raftutil/msgpack.go index 3bb1e6f42971..694eedaca104 100644 --- a/helper/raftutil/msgpack.go +++ b/helper/raftutil/msgpack.go @@ -9,7 +9,7 @@ import ( "time" "unicode" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/helper/raftutil/msgpack_test.go b/helper/raftutil/msgpack_test.go index 1ef4de64db51..067f96a36d92 100644 --- a/helper/raftutil/msgpack_test.go +++ b/helper/raftutil/msgpack_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/stretchr/testify/require" diff --git a/helper/raftutil/state.go b/helper/raftutil/state.go index 2172f4d5e0ac..c9a097924d05 100644 --- a/helper/raftutil/state.go +++ b/helper/raftutil/state.go @@ -12,7 +12,7 @@ import ( "strings" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/raft" raftboltdb "github.com/hashicorp/raft-boltdb/v2" @@ -31,7 +31,6 @@ func RaftStateInfo(p string) (store *raftboltdb.BoltStore, firstIdx uint64, last ReadOnly: true, Timeout: 1 * time.Second, }, - MsgpackUseNewTimeFormat: true, } s, err := raftboltdb.New(opts) if err != nil { diff --git a/helper/snapshot/snapshot_test.go b/helper/snapshot/snapshot_test.go index e17b10bf6ef5..03090c84e5bb 100644 --- a/helper/snapshot/snapshot_test.go +++ b/helper/snapshot/snapshot_test.go @@ -16,7 +16,7 @@ import ( "time" "github.com/hashicorp/consul/sdk/testutil" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/raft" "github.com/stretchr/testify/require" diff --git a/helper/subproc/self.go b/helper/subproc/self.go index 7f0709753c38..1f44cbce96be 100644 --- a/helper/subproc/self.go +++ b/helper/subproc/self.go @@ -8,31 +8,30 @@ import ( "os" "os/exec" "strings" - "sync" ) var ( // executable is the executable of this process executable string - once sync.Once ) -// Self returns the path to the executable of this process. -func Self() string { - once.Do(func() { - s, err := os.Executable() - if err != nil { - panic(fmt.Sprintf("failed to detect executable: %v", err)) - } +func init() { + s, err := os.Executable() + if err != nil { + panic(fmt.Sprintf("failed to detect executable: %v", err)) + } - // when running tests, we need to use the real nomad binary, - // and make sure you recompile between changes! - if strings.HasSuffix(s, ".test") { - if s, err = exec.LookPath("nomad"); err != nil { - panic(fmt.Sprintf("failed to find nomad binary: %v", err)) - } + // when running tests, we need to use the real nomad binary, + // and make sure you recompile between changes! + if strings.HasSuffix(s, ".test") { + if s, err = exec.LookPath("nomad"); err != nil { + panic(fmt.Sprintf("failed to find nomad binary: %v", err)) } - executable = s - }) + } + executable = s +} + +// Self returns the path to the executable of this process. +func Self() string { return executable } diff --git a/helper/subproc/self_test.go b/helper/subproc/self_test.go deleted file mode 100644 index 98b45eb53eed..000000000000 --- a/helper/subproc/self_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package subproc - -import ( - "testing" - - "github.com/shoenig/test/must" -) - -func TestSelf(t *testing.T) { - value := Self() - must.NotEq(t, "", value) -} diff --git a/helper/subproc/subproc.go b/helper/subproc/subproc.go index 1857fa4f8711..0cc18422d1cf 100644 --- a/helper/subproc/subproc.go +++ b/helper/subproc/subproc.go @@ -15,16 +15,13 @@ import ( const ( // ExitSuccess indicates the subprocess completed successfully. - ExitSuccess = 0 + ExitSuccess = iota // ExitFailure indicates the subprocess terminated unsuccessfully. - ExitFailure = 1 + ExitFailure // ExitTimeout indicates the subprocess timed out before completion. - ExitTimeout = 2 - - // ExitNotRunnable indicates a command cannot be run. - ExitNotRunnable = 127 // bash-ism + ExitTimeout ) // MainFunc is the function that runs for this sub-process. diff --git a/helper/users/dynamic/pool.go b/helper/users/dynamic/pool.go deleted file mode 100644 index 8b020dab18a8..000000000000 --- a/helper/users/dynamic/pool.go +++ /dev/null @@ -1,176 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -// Package dynamic provides a way of allocating UID/GID to be used by Nomad -// tasks with no associated service users managed by the operating system. -package dynamic - -import ( - "errors" - "math/rand" - "strconv" - "sync" - - "github.com/hashicorp/go-set/v2" - "github.com/hashicorp/nomad/helper" -) - -var ( - ErrPoolExhausted = errors.New("users: uid/gid pool exhausted") - ErrReleaseUnused = errors.New("users: release of unused uid/gid") - ErrCannotParse = errors.New("users: unable to parse uid/gid from username") -) - -// none indicates no dynamic user -const none = 0 - -// doNotEnable indicates functionality should be disabled -const doNotEnable = -1 - -// A UGID is a combination User (UID) and Group (GID). Since Nomad is -// allocating these values together from the same pool it can ensure they are -// always matching values, thus encoding them with one value. -type UGID int - -// String returns the string representation of a UGID. -// -// It's just the numbers. -func (id UGID) String() string { - return strconv.Itoa(int(id)) -} - -// A Pool is used to manage a reserved set of UID/GID values. A UGID can be -// acquired from the pool and released back to the pool. To support client -// restarts, specific UGIDs can be marked as in-use and can later be released -// back into the pool. -type Pool interface { - // Restore a UGID currently in use by a Task during a Nomad client restore. - Restore(UGID) - - // Acquire returns a UGID that is not currently in use. - Acquire() (UGID, error) - - // Release returns a UGID no longer being used into the pool. - Release(UGID) error -} - -// PoolConfig contains options for creating a new Pool. -type PoolConfig struct { - // MinUGID is the minimum value for a UGID allocated from the pool. - MinUGID int - - // MaxUGID is the maximum value for a UGID allocated from the pool. - MaxUGID int -} - -// disable will return true if either min or max is set to Disable (-1), -// indicating the client should not enable the dynamic workload users -// functionality -func (p *PoolConfig) disable() bool { - return p.MinUGID == doNotEnable || p.MaxUGID == doNotEnable -} - -// New creates a Pool with the given PoolConfig options. -func New(opts *PoolConfig) Pool { - if opts == nil { - panic("bug: users pool cannot be nil") - } - if opts.disable() { - return new(noopPool) - } - if opts.MinUGID < 0 { - panic("bug: users pool min must be >= 0") - } - if opts.MaxUGID < opts.MinUGID { - panic("bug: users pool max must be >= min") - } - // a small but reasonable number of tasks to expect - const defaultPoolCapacity = 32 - return &pool{ - min: UGID(opts.MinUGID), - max: UGID(opts.MaxUGID), - lock: new(sync.Mutex), - used: set.New[UGID](defaultPoolCapacity), - } -} - -// noopPool is an implementation of Pool that does not allow acquiring ugids -type noopPool struct{} - -func (*noopPool) Restore(UGID) {} -func (*noopPool) Acquire() (UGID, error) { - return 0, errors.New("dynamic workload users disabled") -} -func (*noopPool) Release(UGID) error { - // avoid giving an error if a client is restarted with a new config - // that disables dynamic workload users but still has a task running - // making use of one - return nil -} - -type pool struct { - min UGID - max UGID - - lock *sync.Mutex - used *set.Set[UGID] -} - -func (p *pool) Restore(id UGID) { - helper.WithLock(p.lock, func() { - p.used.Insert(id) - }) -} - -func (p *pool) Acquire() (UGID, error) { - p.lock.Lock() - defer p.lock.Unlock() - - // optimize the case where the pool is exhausted - if p.used.Size() == int((p.max-p.min)+1) { - return none, ErrPoolExhausted - } - - // attempt to select a random ugid - if id := p.random(); id != none { - return id, nil - } - - // slow case where we iterate each id looking for one that is not used - for id := p.min; id <= p.max; id++ { - if !p.used.Contains(id) { - p.used.Insert(id) - return id, nil - } - } - - // we checked for this case up top; if we get here there is a bug - panic("bug: pool exhausted") -} - -// random will attempt to select a random UGID from the pool -func (p *pool) random() UGID { - // make up to 10 attempts to find a random unused UGID - // if all 10 attempts fail, return the sentinel indicating as much - const maxAttempts = 10 - size := int64(p.max - p.min) - tries := int(min(maxAttempts, size)) - for attempt := 0; attempt < tries; attempt++ { - id := UGID(rand.Int63n(size)) + p.min - if p.used.Insert(id) { - return id - } - } - return none -} - -func (p *pool) Release(id UGID) error { - p.lock.Lock() - defer p.lock.Unlock() - - if !p.used.Remove(id) { - return ErrReleaseUnused - } - - return nil -} diff --git a/helper/users/dynamic/pool_test.go b/helper/users/dynamic/pool_test.go deleted file mode 100644 index 8435e02ed49e..000000000000 --- a/helper/users/dynamic/pool_test.go +++ /dev/null @@ -1,119 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -//go:build linux - -package dynamic - -import ( - "fmt" - "slices" - "testing" - - "github.com/shoenig/test/must" -) - -var testPoolConfig = &PoolConfig{ - MinUGID: 200, - MaxUGID: 209, -} - -func TestPool_Release_unused(t *testing.T) { - p := New(testPoolConfig) - - cases := []struct { - id UGID - }{ - {id: 0}, - {id: 200}, - {id: 205}, - {id: 209}, - {id: 210}, - } - - for _, tc := range cases { - t.Run(fmt.Sprintf("id%s", tc.id), func(t *testing.T) { - err := p.Release(tc.id) - must.ErrorIs(t, ErrReleaseUnused, err) - }) - } -} - -func TestPool_Acquire_exhausted(t *testing.T) { - p := New(testPoolConfig) - - // consume all 10 ugids - for i := 200; i <= 209; i++ { - v, err := p.Acquire() - must.NoError(t, err) - must.Between[UGID](t, 200, v, 209) - } - - // next acquire should fail - v, err := p.Acquire() - must.Eq(t, none, v) - must.ErrorIs(t, ErrPoolExhausted, err) - - // let go of one ugid - err2 := p.Release(204) - must.NoError(t, err2) - - // now an acquire should succeed - v2, err3 := p.Acquire() - must.NoError(t, err3) - must.Eq(t, 204, v2) -} - -func TestPool_Acquire_random(t *testing.T) { - run1 := make([]UGID, 10) - run2 := make([]UGID, 10) - - p1 := New(testPoolConfig) - p2 := New(testPoolConfig) - - // acquire all 10 UGIDs and record the order of each - for i := 0; i < 10; i++ { - v1, err1 := p1.Acquire() - must.NoError(t, err1) - - v2, err2 := p2.Acquire() - must.NoError(t, err2) - - run1[i] = v1 - run2[i] = v2 - } - - // ensure both runs contain the expected ugids - exp := []UGID{200, 201, 202, 203, 204, 205, 206, 207, 208, 209} - must.SliceContainsAll(t, exp, run1) - must.SliceContainsAll(t, exp, run2) -} - -func TestPool_Restore(t *testing.T) { - p := New(&PoolConfig{ - MinUGID: 500, - MaxUGID: 505, - }) // 6 GUIDs - - // restore 501, 502, 504 - p.Restore(501) - p.Restore(502) - p.Restore(504) - - v1, err1 := p.Acquire() - must.NoError(t, err1) - - v2, err2 := p.Acquire() - must.NoError(t, err2) - - v3, err3 := p.Acquire() - must.NoError(t, err3) - - // ensure the next 3 are the UGIDs that were not already consumed - // and set via Restore - ids := []UGID{v1, v2, v3} - slices.Sort(ids) - must.Eq(t, 500, ids[0]) - must.Eq(t, 503, ids[1]) - must.Eq(t, 505, ids[2]) -} diff --git a/helper/users/dynamic/users.go b/helper/users/dynamic/users.go deleted file mode 100644 index 9d065b5fda88..000000000000 --- a/helper/users/dynamic/users.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package dynamic - -import ( - "fmt" - "regexp" - "strconv" - - "github.com/hashicorp/nomad/helper/users" -) - -const ( - // Home is the non-existent directory path to associate with dynamic - // workload users. Any operation on this path should cause an error. - // - // The path '/nonexistent' is consistent with what systemd uses for - // non-interactive service users. - Home = "/nonexistent" -) - -// String creates a pseudo username encoding the given ugid, in the form -// 'nomad-'. -func String(ugid UGID) string { - return fmt.Sprintf("nomad-%d", ugid) -} - -var ( - re = regexp.MustCompile(`^nomad-(\d+)$`) -) - -// Parse the given pseudo username and extract the ugid. -func Parse(user string) (UGID, error) { - values := re.FindStringSubmatch(user) - if len(values) != 2 { - return none, ErrCannotParse - } - - i, err := strconv.ParseUint(values[1], 10, 64) - if err != nil { - return none, ErrCannotParse - } - - return UGID(i), err -} - -// LookupUser will return the UID, GID, and home directory associated with the -// given username. If username is of the form 'nomad-' this indicates Nomad -// has synthesized a dynamic workload user for the task and the UID/GID are the -// value. -func LookupUser(username string) (int, int, string, error) { - // if we can successfully parse username as an anonymous user, use that - ugid, err := Parse(username) - if err == nil { - return int(ugid), int(ugid), Home, nil - } - - // otherwise lookup the user using nomad's user lookup cache - return users.LookupUnix(username) -} diff --git a/helper/users/lookup.go b/helper/users/lookup.go index bc0a7e70e167..a8e846c4d201 100644 --- a/helper/users/lookup.go +++ b/helper/users/lookup.go @@ -24,29 +24,6 @@ func Lookup(username string) (*user.User, error) { return globalCache.GetUser(username) } -// LookupUnix returns the UID, GID, and home directory for username or returns -// an error. ID values are int to work well with Go library functions. -// -// Will always fail on Windows and Plan 9. -func LookupUnix(username string) (int, int, string, error) { - u, err := Lookup(username) - if err != nil { - return 0, 0, "", fmt.Errorf("error looking up user %q: %w", username, err) - } - - uid, err := strconv.Atoi(u.Uid) - if err != nil { - return 0, 0, "", fmt.Errorf("error parsing uid: %w", err) - } - - gid, err := strconv.Atoi(u.Gid) - if err != nil { - return 0, 0, "", fmt.Errorf("error parsing gid: %w", err) - } - - return uid, gid, u.HomeDir, nil -} - // lock is used to serialize all user lookup at the process level, because // some NSS implementations are not concurrency safe var lock sync.Mutex @@ -66,6 +43,23 @@ func Current() (*user.User, error) { return user.Current() } +// UIDforUser returns the UID for the specified username or returns an error. +// +// Will always fail on Windows and Plan 9. +func UIDforUser(username string) (int, error) { + u, err := Lookup(username) + if err != nil { + return 0, err + } + + uid, err := strconv.Atoi(u.Uid) + if err != nil { + return 0, fmt.Errorf("error parsing uid: %w", err) + } + + return uid, nil +} + // WriteFileFor is like os.WriteFile except if possible it chowns the file to // the specified user (possibly from Task.User) and sets the permissions to // 0o600. @@ -104,7 +98,7 @@ func WriteFileFor(path string, contents []byte, username string) error { } func writeFileFor(path string, contents []byte, username string) error { - uid, _, _, err := LookupUnix(username) + uid, err := UIDforUser(username) if err != nil { return err } @@ -160,7 +154,7 @@ func SocketFileFor(logger hclog.Logger, path, username string) (net.Listener, er } func setSocketOwner(path, username string) error { - uid, _, _, err := LookupUnix(username) + uid, err := UIDforUser(username) if err != nil { return err } diff --git a/helper/users/lookup_linux_test.go b/helper/users/lookup_linux_test.go index ecb42b7fa1b7..45d12081a181 100644 --- a/helper/users/lookup_linux_test.go +++ b/helper/users/lookup_linux_test.go @@ -114,19 +114,3 @@ func TestSocketFileFor_Linux(t *testing.T) { must.Eq(t, 0o666, int(stat.Mode().Perm())) } } - -func TestLookupUnix_root(t *testing.T) { - uid, gid, home, err := LookupUnix("root") - must.NoError(t, err) - must.Zero(t, uid) // linux - must.Zero(t, gid) // linux - must.Eq(t, "/root", home) // ubuntu specific -} - -func TestLookupUnix_nobody(t *testing.T) { - uid, gid, home, err := LookupUnix("nobody") - must.NoError(t, err) - must.Eq(t, 65534, uid) // systemd specific - must.Eq(t, 65534, gid) // systemd specific - must.Eq(t, "/nonexistent", home) // ubuntu specific -} diff --git a/jobspec/parse.go b/jobspec/parse.go index 245e6a9dd970..6d622d542671 100644 --- a/jobspec/parse.go +++ b/jobspec/parse.go @@ -451,42 +451,6 @@ func parseUpdate(result **api.UpdateStrategy, list *ast.ObjectList) error { return dec.Decode(m) } -func parseDisconnect(result **api.DisconnectStrategy, list *ast.ObjectList) error { - list = list.Elem() - if len(list.Items) > 1 { - return fmt.Errorf("only one 'disconnect' block allowed") - } - - // Get our resource object - o := list.Items[0] - - var m map[string]interface{} - if err := hcl.DecodeObject(&m, o.Val); err != nil { - return err - } - - // Check for invalid keys - valid := []string{ - "lost_after", - "replace", - "reconcile", - "stop_on_client_after", - } - if err := checkHCLKeys(o.Val, valid); err != nil { - return err - } - - dec, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ - DecodeHook: mapstructure.StringToTimeDurationHookFunc(), - WeaklyTypedInput: true, - Result: result, - }) - if err != nil { - return err - } - return dec.Decode(m) -} - func parseMigrate(result **api.MigrateStrategy, list *ast.ObjectList) error { list = list.Elem() if len(list.Items) > 1 { diff --git a/jobspec/parse_group.go b/jobspec/parse_group.go index e940955b787f..94dc3008d9c3 100644 --- a/jobspec/parse_group.go +++ b/jobspec/parse_group.go @@ -50,7 +50,6 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error { "task", "ephemeral_disk", "update", - "disconnect", "reschedule", "vault", "migrate", @@ -80,7 +79,6 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error { delete(m, "restart") delete(m, "ephemeral_disk") delete(m, "update") - delete(m, "disconnect") delete(m, "vault") delete(m, "migrate") delete(m, "spread") @@ -170,13 +168,6 @@ func parseGroups(result *api.Job, list *ast.ObjectList) error { } } - // If we have an disconnect strategy, then parse that - if o := listVal.Filter("disconnect"); len(o.Items) > 0 { - if err := parseDisconnect(&g.Disconnect, o); err != nil { - return multierror.Prefix(err, "update ->") - } - } - // If we have a migration strategy, then parse that if o := listVal.Filter("migrate"); len(o.Items) > 0 { if err := parseMigrate(&g.Migrate, o); err != nil { diff --git a/jobspec/parse_test.go b/jobspec/parse_test.go index d9f6774a1906..20cfc5b182c8 100644 --- a/jobspec/parse_test.go +++ b/jobspec/parse_test.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/nomad/api" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) // consts copied from nomad/structs package to keep jobspec isolated from rest of nomad @@ -140,10 +140,8 @@ func TestParse(t *testing.T) { }, { - Name: stringToPtr("binsl"), - Count: intToPtr(5), - StopAfterClientDisconnect: timeToPtr(120 * time.Second), - MaxClientDisconnect: timeToPtr(120 * time.Hour), + Name: stringToPtr("binsl"), + Count: intToPtr(5), Constraints: []*api.Constraint{ { LTarget: "kernel.os", @@ -223,12 +221,8 @@ func TestParse(t *testing.T) { }, }, }, - Disconnect: &api.DisconnectStrategy{ - StopOnClientAfter: timeToPtr(120 * time.Second), - LostAfter: timeToPtr(120 * time.Hour), - Replace: boolToPtr(true), - Reconcile: stringToPtr("best_score"), - }, + StopAfterClientDisconnect: timeToPtr(120 * time.Second), + MaxClientDisconnect: timeToPtr(120 * time.Hour), ReschedulePolicy: &api.ReschedulePolicy{ Interval: timeToPtr(12 * time.Hour), Attempts: intToPtr(5), @@ -1940,14 +1934,14 @@ func TestParse(t *testing.T) { t.Logf("Testing parse: %s", tc.File) path, err := filepath.Abs(filepath.Join("./test-fixtures", tc.File)) - must.NoError(t, err) + require.NoError(t, err) actual, err := ParseFile(path) if tc.Err { - must.Error(t, err) + require.Error(t, err) } else { - must.NoError(t, err) - must.Eq(t, tc.Result, actual) + require.NoError(t, err) + require.Equal(t, tc.Result, actual) } }) } @@ -2018,15 +2012,15 @@ func TestPortParsing(t *testing.T) { var job *api.Job path, err = filepath.Abs(filepath.Join("./test-fixtures", "parse-ports.hcl")) - must.NoError(t, err, must.Sprint("Can't get absolute path for file: parse-ports.hcl")) + require.NoError(t, err, "Can't get absolute path for file: parse-ports.hcl") job, err = ParseFile(path) - must.NoError(t, err) - must.NotNil(t, job) - must.Len(t, 1, job.TaskGroups) - must.Len(t, 1, job.TaskGroups[0].Networks) - must.Len(t, 1, job.TaskGroups[0].Networks[0].ReservedPorts) - must.Len(t, 1, job.TaskGroups[0].Networks[0].DynamicPorts) - must.Eq(t, 9000, job.TaskGroups[0].Networks[0].ReservedPorts[0].Value) - must.Eq(t, 0, job.TaskGroups[0].Networks[0].DynamicPorts[0].Value) + require.NoError(t, err, "cannot parse job") + require.NotNil(t, job) + require.Len(t, job.TaskGroups, 1) + require.Len(t, job.TaskGroups[0].Networks, 1) + require.Len(t, job.TaskGroups[0].Networks[0].ReservedPorts, 1) + require.Len(t, job.TaskGroups[0].Networks[0].DynamicPorts, 1) + require.Equal(t, 9000, job.TaskGroups[0].Networks[0].ReservedPorts[0].Value) + require.Equal(t, 0, job.TaskGroups[0].Networks[0].DynamicPorts[0].Value) } diff --git a/jobspec/test-fixtures/basic.hcl b/jobspec/test-fixtures/basic.hcl index ed60d8799277..1176a9d96291 100644 --- a/jobspec/test-fixtures/basic.hcl +++ b/jobspec/test-fixtures/basic.hcl @@ -165,13 +165,6 @@ job "binstore-storagelocker" { stop_after_client_disconnect = "120s" max_client_disconnect = "120h" - disconnect { - lost_after = "120h" - stop_on_client_after = "120s" - replace = true - reconcile = "best_score" - } - task "binstore" { driver = "docker" user = "bob" diff --git a/nomad/acl_endpoint_test.go b/nomad/acl_endpoint_test.go index d087f8ab09e0..3d5a260b5808 100644 --- a/nomad/acl_endpoint_test.go +++ b/nomad/acl_endpoint_test.go @@ -14,7 +14,7 @@ import ( "github.com/golang-jwt/jwt/v5" capOIDC "github.com/hashicorp/cap/oidc" "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" diff --git a/nomad/acl_test.go b/nomad/acl_test.go index 85870e2863a8..e76da9d7e4ec 100644 --- a/nomad/acl_test.go +++ b/nomad/acl_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/mock" diff --git a/nomad/alloc_endpoint_test.go b/nomad/alloc_endpoint_test.go index 6e4a2daf960e..de988d0b0ee2 100644 --- a/nomad/alloc_endpoint_test.go +++ b/nomad/alloc_endpoint_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" diff --git a/nomad/client_agent_endpoint.go b/nomad/client_agent_endpoint.go index 1d63acfa2223..c8f23375f6f3 100644 --- a/nomad/client_agent_endpoint.go +++ b/nomad/client_agent_endpoint.go @@ -22,7 +22,7 @@ import ( "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" ) type Agent struct { diff --git a/nomad/client_agent_endpoint_test.go b/nomad/client_agent_endpoint_test.go index 3dcaa9ef7339..d6b4595fd16c 100644 --- a/nomad/client_agent_endpoint_test.go +++ b/nomad/client_agent_endpoint_test.go @@ -14,7 +14,7 @@ import ( "time" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" diff --git a/nomad/client_alloc_endpoint.go b/nomad/client_alloc_endpoint.go index 5b782f57e80f..b548b46cc1a3 100644 --- a/nomad/client_alloc_endpoint.go +++ b/nomad/client_alloc_endpoint.go @@ -13,7 +13,7 @@ import ( "github.com/armon/go-metrics" "github.com/hashicorp/go-hclog" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" cstructs "github.com/hashicorp/nomad/client/structs" diff --git a/nomad/client_alloc_endpoint_test.go b/nomad/client_alloc_endpoint_test.go index 9ccc195dc4e9..3d2df9f39a25 100644 --- a/nomad/client_alloc_endpoint_test.go +++ b/nomad/client_alloc_endpoint_test.go @@ -12,8 +12,8 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + "github.com/hashicorp/go-msgpack/codec" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" diff --git a/nomad/client_csi_endpoint_test.go b/nomad/client_csi_endpoint_test.go index 3ccbfebf2693..037e75358bba 100644 --- a/nomad/client_csi_endpoint_test.go +++ b/nomad/client_csi_endpoint_test.go @@ -10,7 +10,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" "github.com/hashicorp/nomad/client/config" diff --git a/nomad/client_fs_endpoint.go b/nomad/client_fs_endpoint.go index 165d88b70a61..9869e43cc70b 100644 --- a/nomad/client_fs_endpoint.go +++ b/nomad/client_fs_endpoint.go @@ -16,7 +16,7 @@ import ( cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/pointer" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/client_fs_endpoint_test.go b/nomad/client_fs_endpoint_test.go index 470cad3dd218..6fa68c668acc 100644 --- a/nomad/client_fs_endpoint_test.go +++ b/nomad/client_fs_endpoint_test.go @@ -11,8 +11,8 @@ import ( "testing" "time" - codec "github.com/hashicorp/go-msgpack/v2/codec" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + codec "github.com/hashicorp/go-msgpack/codec" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" diff --git a/nomad/client_rpc.go b/nomad/client_rpc.go index 99140f5769fa..8fecef93b526 100644 --- a/nomad/client_rpc.go +++ b/nomad/client_rpc.go @@ -9,9 +9,9 @@ import ( "net" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" multierror "github.com/hashicorp/go-multierror" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/yamux" diff --git a/nomad/client_stats_endpoint_test.go b/nomad/client_stats_endpoint_test.go index 55c439da54ad..2703b008784c 100644 --- a/nomad/client_stats_endpoint_test.go +++ b/nomad/client_stats_endpoint_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client" diff --git a/nomad/config.go b/nomad/config.go index cee2c3124b6a..ff0d4d7b5c31 100644 --- a/nomad/config.go +++ b/nomad/config.go @@ -638,9 +638,6 @@ func DefaultConfig() *Config { c.SerfConfig.MemberlistConfig = memberlist.DefaultWANConfig() c.SerfConfig.MemberlistConfig.BindPort = DefaultSerfPort - c.SerfConfig.MsgpackUseNewTimeFormat = true - c.SerfConfig.MemberlistConfig.MsgpackUseNewTimeFormat = true - // Disable shutdown on removal c.RaftConfig.ShutdownOnRemove = false diff --git a/nomad/core_sched_test.go b/nomad/core_sched_test.go index 4d8e42c48991..7a7e95927b62 100644 --- a/nomad/core_sched_test.go +++ b/nomad/core_sched_test.go @@ -9,7 +9,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" diff --git a/nomad/csi_endpoint_test.go b/nomad/csi_endpoint_test.go index c1d1e9959430..cfb02b02bde2 100644 --- a/nomad/csi_endpoint_test.go +++ b/nomad/csi_endpoint_test.go @@ -12,7 +12,7 @@ import ( "time" "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/shoenig/test" "github.com/shoenig/test/must" "github.com/stretchr/testify/assert" diff --git a/nomad/deployment_endpoint_test.go b/nomad/deployment_endpoint_test.go index e63180ead905..0b4f6751da30 100644 --- a/nomad/deployment_endpoint_test.go +++ b/nomad/deployment_endpoint_test.go @@ -8,7 +8,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" diff --git a/nomad/drainer_int_test.go b/nomad/drainer_int_test.go index 7c8b464ceafd..33c90ddeabb7 100644 --- a/nomad/drainer_int_test.go +++ b/nomad/drainer_int_test.go @@ -11,7 +11,7 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/shoenig/test/must" "github.com/shoenig/test/wait" diff --git a/nomad/encrypter_test.go b/nomad/encrypter_test.go index a14513ea029d..3992f8bb681f 100644 --- a/nomad/encrypter_test.go +++ b/nomad/encrypter_test.go @@ -15,7 +15,7 @@ import ( "time" "github.com/go-jose/go-jose/v3/jwt" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/shoenig/test" "github.com/shoenig/test/must" "github.com/stretchr/testify/require" diff --git a/nomad/eval_broker_test.go b/nomad/eval_broker_test.go index d74eff85e8fa..5245aa392208 100644 --- a/nomad/eval_broker_test.go +++ b/nomad/eval_broker_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/shoenig/test" "github.com/shoenig/test/must" "github.com/shoenig/test/wait" diff --git a/nomad/eval_endpoint_test.go b/nomad/eval_endpoint_test.go index 1ea56ec00986..26be17cf4c24 100644 --- a/nomad/eval_endpoint_test.go +++ b/nomad/eval_endpoint_test.go @@ -12,7 +12,7 @@ import ( memdb "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-set/v2" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" diff --git a/nomad/event_endpoint.go b/nomad/event_endpoint.go index e05df130fac1..3c679ce77fd2 100644 --- a/nomad/event_endpoint.go +++ b/nomad/event_endpoint.go @@ -8,7 +8,7 @@ import ( "io" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/stream" diff --git a/nomad/event_endpoint_test.go b/nomad/event_endpoint_test.go index 4e11bb997f4e..de1f9d0dbafb 100644 --- a/nomad/event_endpoint_test.go +++ b/nomad/event_endpoint_test.go @@ -14,8 +14,8 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + "github.com/hashicorp/go-msgpack/codec" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" diff --git a/nomad/fsm.go b/nomad/fsm.go index ff462036ec4f..db639d42e5a4 100644 --- a/nomad/fsm.go +++ b/nomad/fsm.go @@ -14,7 +14,7 @@ import ( "github.com/hashicorp/go-bexpr" "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/fsm_registry_ce.go b/nomad/fsm_registry_ce.go index 97bfbd8a7afd..dc433514c00f 100644 --- a/nomad/fsm_registry_ce.go +++ b/nomad/fsm_registry_ce.go @@ -7,7 +7,7 @@ package nomad import ( - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/raft" ) diff --git a/nomad/heartbeat_test.go b/nomad/heartbeat_test.go index 52d234e6df14..80e1014c670a 100644 --- a/nomad/heartbeat_test.go +++ b/nomad/heartbeat_test.go @@ -9,13 +9,12 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/pointer" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" - "github.com/shoenig/test/must" "github.com/stretchr/testify/require" ) @@ -294,71 +293,6 @@ func TestHeartbeat_Server_HeartbeatTTL_Failover(t *testing.T) { func TestHeartbeat_InvalidateHeartbeat_DisconnectedClient(t *testing.T) { ci.Parallel(t) - testCases := []struct { - name string - now time.Time - lostAfterOnDisconnect time.Duration - expectedNodeStatus string - }{ - { - name: "has-pending-reconnects", - now: time.Now().UTC(), - lostAfterOnDisconnect: 5 * time.Second, - expectedNodeStatus: structs.NodeStatusDisconnected, - }, - { - name: "has-expired-reconnects", - lostAfterOnDisconnect: 5 * time.Second, - now: time.Now().UTC().Add(-10 * time.Second), - expectedNodeStatus: structs.NodeStatusDown, - }, - { - name: "has-expired-reconnects-equal-timestamp", - lostAfterOnDisconnect: 5 * time.Second, - now: time.Now().UTC().Add(-5 * time.Second), - expectedNodeStatus: structs.NodeStatusDown, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - s1, cleanupS1 := TestServer(t, nil) - defer cleanupS1() - testutil.WaitForLeader(t, s1.RPC) - - // Create a node - node := mock.Node() - state := s1.fsm.State() - must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1, node)) - - alloc := mock.Alloc() - alloc.NodeID = node.ID - alloc.Job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - LostAfter: tc.lostAfterOnDisconnect, - } - alloc.ClientStatus = structs.AllocClientStatusUnknown - alloc.AllocStates = []*structs.AllocState{{ - Field: structs.AllocStateFieldClientStatus, - Value: structs.AllocClientStatusUnknown, - Time: tc.now, - }} - - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc})) - - // Trigger status update - s1.invalidateHeartbeat(node.ID) - out, err := state.NodeByID(nil, node.ID) - must.NoError(t, err) - must.Eq(t, tc.expectedNodeStatus, out.Status) - }) - } -} - -// Test using max_client_disconnect, remove after its deprecated in favor -// of Disconnect.LostAfter introduced in 1.8.0. -func TestHeartbeat_InvalidateHeartbeatDisconnectedClient(t *testing.T) { - ci.Parallel(t) - type testCase struct { name string now time.Time @@ -402,7 +336,7 @@ func TestHeartbeat_InvalidateHeartbeatDisconnectedClient(t *testing.T) { // Create a node node := mock.Node() state := s1.fsm.State() - must.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1, node)) + require.NoError(t, state.UpsertNode(structs.MsgTypeTestSetup, 1, node)) alloc := mock.Alloc() alloc.NodeID = node.ID @@ -413,13 +347,13 @@ func TestHeartbeat_InvalidateHeartbeatDisconnectedClient(t *testing.T) { Value: structs.AllocClientStatusUnknown, Time: tc.now, }} - must.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc})) + require.NoError(t, state.UpsertAllocs(structs.MsgTypeTestSetup, 2, []*structs.Allocation{alloc})) // Trigger status update s1.invalidateHeartbeat(node.ID) out, err := state.NodeByID(nil, node.ID) - must.NoError(t, err) - must.Eq(t, tc.expectedNodeStatus, out.Status) + require.NoError(t, err) + require.Equal(t, tc.expectedNodeStatus, out.Status) }) } } diff --git a/nomad/job_endpoint_ce_test.go b/nomad/job_endpoint_ce_test.go index 3e90402c30e8..1f13c2132ab8 100644 --- a/nomad/job_endpoint_ce_test.go +++ b/nomad/job_endpoint_ce_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" "github.com/hashicorp/nomad/helper/pointer" diff --git a/nomad/job_endpoint_test.go b/nomad/job_endpoint_test.go index c2e48cbfb44e..8d910ccd9a7a 100644 --- a/nomad/job_endpoint_test.go +++ b/nomad/job_endpoint_test.go @@ -12,7 +12,7 @@ import ( "time" "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/client/lib/idset" diff --git a/nomad/keyring_endpoint_test.go b/nomad/keyring_endpoint_test.go index e0573fad840b..0262b4b1af14 100644 --- a/nomad/keyring_endpoint_test.go +++ b/nomad/keyring_endpoint_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/shoenig/test/must" "github.com/stretchr/testify/require" diff --git a/nomad/namespace_endpoint_test.go b/nomad/namespace_endpoint_test.go index c24b3d525f57..384d4c2f9f04 100644 --- a/nomad/namespace_endpoint_test.go +++ b/nomad/namespace_endpoint_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" diff --git a/nomad/node_endpoint_test.go b/nomad/node_endpoint_test.go index b392dd653f76..799df7b6005e 100644 --- a/nomad/node_endpoint_test.go +++ b/nomad/node_endpoint_test.go @@ -15,7 +15,7 @@ import ( "time" memdb "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/command/agent/consul" @@ -919,220 +919,188 @@ func TestClientEndpoint_UpdateStatus_Vault_WorkloadIdentity(t *testing.T) { func TestClientEndpoint_UpdateStatus_Reconnect(t *testing.T) { ci.Parallel(t) - jobVersions := []struct { - name string - jobSpec func(time.Duration) *structs.Job - }{ - // Test using max_client_disconnect, remove after its deprecated in favor - // of Disconnect.LostAfter introduced in 1.8.0. - { - name: "job-with-max-client-disconnect-deprecated", - jobSpec: func(maxClientDisconnect time.Duration) *structs.Job { - job := mock.Job() - job.TaskGroups[0].MaxClientDisconnect = &maxClientDisconnect + // Setup server with tighter heartbeat so we don't have to wait so long + // for nodes to go down. + heartbeatTTL := time.Duration(500*testutil.TestMultiplier()) * time.Millisecond + s, cleanupS := TestServer(t, func(c *Config) { + c.MinHeartbeatTTL = heartbeatTTL + c.HeartbeatGrace = 2 * heartbeatTTL + }) + codec := rpcClient(t, s) + defer cleanupS() + testutil.WaitForLeader(t, s.RPC) - return job - }, - }, - { - name: "job-with-disconnect-block", - jobSpec: func(lostAfter time.Duration) *structs.Job { - job := mock.Job() - job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - LostAfter: lostAfter, - } - return job - }, - }, + // Register node. + node := mock.Node() + reg := &structs.NodeRegisterRequest{ + Node: node, + WriteRequest: structs.WriteRequest{Region: "global"}, } + var nodeUpdateResp structs.NodeUpdateResponse + err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &nodeUpdateResp) + must.NoError(t, err) - for _, version := range jobVersions { - t.Run(version.name, func(t *testing.T) { + // Start heartbeat. + heartbeat := func(ctx context.Context) { + ticker := time.NewTicker(heartbeatTTL / 2) + defer ticker.Stop() - // Setup server with tighter heartbeat so we don't have to wait so long - // for nodes to go down. - heartbeatTTL := time.Duration(500*testutil.TestMultiplier()) * time.Millisecond - s, cleanupS := TestServer(t, func(c *Config) { - c.MinHeartbeatTTL = heartbeatTTL - c.HeartbeatGrace = 2 * heartbeatTTL - }) - codec := rpcClient(t, s) - defer cleanupS() - testutil.WaitForLeader(t, s.RPC) + for { + select { + case <-ctx.Done(): + return + case <-ticker.C: + if t.Failed() { + return + } - // Register node. - node := mock.Node() - reg := &structs.NodeRegisterRequest{ - Node: node, - WriteRequest: structs.WriteRequest{Region: "global"}, - } - var nodeUpdateResp structs.NodeUpdateResponse - err := msgpackrpc.CallWithCodec(codec, "Node.Register", reg, &nodeUpdateResp) - must.NoError(t, err) - - // Start heartbeat. - heartbeat := func(ctx context.Context) { - ticker := time.NewTicker(heartbeatTTL / 2) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return - case <-ticker.C: - if t.Failed() { - return - } - - req := &structs.NodeUpdateStatusRequest{ - NodeID: node.ID, - Status: structs.NodeStatusReady, - WriteRequest: structs.WriteRequest{Region: "global"}, - } - var resp structs.NodeUpdateResponse - // Ignore errors since an unexpected failed heartbeat will cause - // the test conditions to fail. - msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp) - } + req := &structs.NodeUpdateStatusRequest{ + NodeID: node.ID, + Status: structs.NodeStatusReady, + WriteRequest: structs.WriteRequest{Region: "global"}, } + var resp structs.NodeUpdateResponse + // Ignore errors since an unexpected failed heartbeat will cause + // the test conditions to fail. + msgpackrpc.CallWithCodec(codec, "Node.UpdateStatus", req, &resp) } - heartbeatCtx, cancelHeartbeat := context.WithCancel(context.Background()) - defer cancelHeartbeat() - go heartbeat(heartbeatCtx) - - // Wait for node to be ready. - testutil.WaitForClientStatus(t, s.RPC, node.ID, "global", structs.NodeStatusReady) - - // Register job with Disconnect.LostAfter - job := version.jobSpec(time.Hour) - job.Constraints = []*structs.Constraint{} - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].Constraints = []*structs.Constraint{} - job.TaskGroups[0].Tasks[0].Driver = "mock_driver" - job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{ - "run_for": "10m", - } + } + } + heartbeatCtx, cancelHeartbeat := context.WithCancel(context.Background()) + defer cancelHeartbeat() + go heartbeat(heartbeatCtx) - jobReq := &structs.JobRegisterRequest{ - Job: job, - WriteRequest: structs.WriteRequest{ - Region: "global", - Namespace: job.Namespace, - }, - } - var jobResp structs.JobRegisterResponse - err = msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp) - must.NoError(t, err) + // Wait for node to be ready. + testutil.WaitForClientStatus(t, s.RPC, node.ID, "global", structs.NodeStatusReady) - // Wait for alloc to be pending in the server. - testutil.WaitForJobAllocStatus(t, s.RPC, job, map[string]int{ - structs.AllocClientStatusPending: 1, - }) + // Register job with max_client_disconnect. + job := mock.Job() + job.Constraints = []*structs.Constraint{} + job.TaskGroups[0].Count = 1 + job.TaskGroups[0].MaxClientDisconnect = pointer.Of(time.Hour) + job.TaskGroups[0].Constraints = []*structs.Constraint{} + job.TaskGroups[0].Tasks[0].Driver = "mock_driver" + job.TaskGroups[0].Tasks[0].Config = map[string]interface{}{ + "run_for": "10m", + } - // Get allocs that node should run. - allocsReq := &structs.NodeSpecificRequest{ - NodeID: node.ID, - QueryOptions: structs.QueryOptions{ - Region: "global", - }, - } - var allocsResp structs.NodeAllocsResponse - err = msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", allocsReq, &allocsResp) - must.NoError(t, err) - must.Len(t, 1, allocsResp.Allocs) - - // Tell server the alloc is running. - // Save the alloc so we can reuse the request later. - alloc := allocsResp.Allocs[0].Copy() - alloc.ClientStatus = structs.AllocClientStatusRunning - - allocUpdateReq := &structs.AllocUpdateRequest{ - Alloc: []*structs.Allocation{alloc}, - WriteRequest: structs.WriteRequest{ - Region: "global", - }, - } - var resp structs.GenericResponse - err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &resp) - must.NoError(t, err) + jobReq := &structs.JobRegisterRequest{ + Job: job, + WriteRequest: structs.WriteRequest{ + Region: "global", + Namespace: job.Namespace, + }, + } + var jobResp structs.JobRegisterResponse + err = msgpackrpc.CallWithCodec(codec, "Job.Register", jobReq, &jobResp) + must.NoError(t, err) - // Wait for alloc to be running in the server. - testutil.WaitForJobAllocStatus(t, s.RPC, job, map[string]int{ - structs.AllocClientStatusRunning: 1, - }) + // Wait for alloc to be pending in the server. + testutil.WaitForJobAllocStatus(t, s.RPC, job, map[string]int{ + structs.AllocClientStatusPending: 1, + }) - // Stop heartbeat and wait for the client to be disconnected and the alloc - // to be unknown. - cancelHeartbeat() - testutil.WaitForClientStatus(t, s.RPC, node.ID, "global", structs.NodeStatusDisconnected) - testutil.WaitForJobAllocStatus(t, s.RPC, job, map[string]int{ - structs.AllocClientStatusUnknown: 1, - }) + // Get allocs that node should run. + allocsReq := &structs.NodeSpecificRequest{ + NodeID: node.ID, + QueryOptions: structs.QueryOptions{ + Region: "global", + }, + } + var allocsResp structs.NodeAllocsResponse + err = msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", allocsReq, &allocsResp) + must.NoError(t, err) + must.Len(t, 1, allocsResp.Allocs) - // Restart heartbeat to reconnect node. - heartbeatCtx, cancelHeartbeat = context.WithCancel(context.Background()) - defer cancelHeartbeat() - go heartbeat(heartbeatCtx) - - // Wait a few heartbeats and check that the node is still initializing. - // - // The heartbeat should not update the node to ready until it updates its - // allocs status with the server so the scheduler have the necessary - // information to avoid unnecessary placements. - time.Sleep(3 * heartbeatTTL) - testutil.WaitForClientStatus(t, s.RPC, node.ID, "global", structs.NodeStatusInit) - - // Get allocs that node should run. - // The node should only have one alloc assigned until it updates its allocs - // status with the server. - allocsReq = &structs.NodeSpecificRequest{ - NodeID: node.ID, - QueryOptions: structs.QueryOptions{ - Region: "global", - }, - } - err = msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", allocsReq, &allocsResp) - must.NoError(t, err) - must.Len(t, 1, allocsResp.Allocs) - - // Tell server the alloc is still running. - err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &resp) - must.NoError(t, err) - - // The client must end in the same state as before it disconnected: - // - client status is ready. - // - only 1 alloc and the alloc is running. - // - all evals are terminal, so cluster is in a stable state. - testutil.WaitForClientStatus(t, s.RPC, node.ID, "global", structs.NodeStatusReady) - testutil.WaitForJobAllocStatus(t, s.RPC, job, map[string]int{ - structs.AllocClientStatusRunning: 1, - }) - testutil.WaitForResult(func() (bool, error) { - state := s.fsm.State() - ws := memdb.NewWatchSet() - evals, err := state.EvalsByJob(ws, job.Namespace, job.ID) - if err != nil { - return false, fmt.Errorf("failed to read evals: %v", err) - } - for _, eval := range evals { - // TODO: remove this check once the disconnect process stops - // leaking a max-disconnect-timeout eval. - // https://github.com/hashicorp/nomad/issues/12809 - if eval.TriggeredBy == structs.EvalTriggerMaxDisconnectTimeout { - continue - } - - if !eval.TerminalStatus() { - return false, fmt.Errorf("found %s eval", eval.Status) - } - } - return true, nil - }, func(err error) { - must.NoError(t, err) - }) + // Tell server the alloc is running. + // Save the alloc so we can reuse the request later. + alloc := allocsResp.Allocs[0].Copy() + alloc.ClientStatus = structs.AllocClientStatusRunning - }) + allocUpdateReq := &structs.AllocUpdateRequest{ + Alloc: []*structs.Allocation{alloc}, + WriteRequest: structs.WriteRequest{ + Region: "global", + }, + } + var resp structs.GenericResponse + err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &resp) + must.NoError(t, err) + + // Wait for alloc to be running in the server. + testutil.WaitForJobAllocStatus(t, s.RPC, job, map[string]int{ + structs.AllocClientStatusRunning: 1, + }) + + // Stop heartbeat and wait for the client to be disconnected and the alloc + // to be unknown. + cancelHeartbeat() + testutil.WaitForClientStatus(t, s.RPC, node.ID, "global", structs.NodeStatusDisconnected) + testutil.WaitForJobAllocStatus(t, s.RPC, job, map[string]int{ + structs.AllocClientStatusUnknown: 1, + }) + + // Restart heartbeat to reconnect node. + heartbeatCtx, cancelHeartbeat = context.WithCancel(context.Background()) + defer cancelHeartbeat() + go heartbeat(heartbeatCtx) + + // Wait a few heartbeats and check that the node is still initializing. + // + // The heartbeat should not update the node to ready until it updates its + // allocs status with the server so the scheduler have the necessary + // information to avoid unnecessary placements. + time.Sleep(3 * heartbeatTTL) + testutil.WaitForClientStatus(t, s.RPC, node.ID, "global", structs.NodeStatusInit) + + // Get allocs that node should run. + // The node should only have one alloc assigned until it updates its allocs + // status with the server. + allocsReq = &structs.NodeSpecificRequest{ + NodeID: node.ID, + QueryOptions: structs.QueryOptions{ + Region: "global", + }, } + err = msgpackrpc.CallWithCodec(codec, "Node.GetAllocs", allocsReq, &allocsResp) + must.NoError(t, err) + must.Len(t, 1, allocsResp.Allocs) + + // Tell server the alloc is still running. + err = msgpackrpc.CallWithCodec(codec, "Node.UpdateAlloc", allocUpdateReq, &resp) + must.NoError(t, err) + + // The client must end in the same state as before it disconnected: + // - client status is ready. + // - only 1 alloc and the alloc is running. + // - all evals are terminal, so cluster is in a stable state. + testutil.WaitForClientStatus(t, s.RPC, node.ID, "global", structs.NodeStatusReady) + testutil.WaitForJobAllocStatus(t, s.RPC, job, map[string]int{ + structs.AllocClientStatusRunning: 1, + }) + testutil.WaitForResult(func() (bool, error) { + state := s.fsm.State() + ws := memdb.NewWatchSet() + evals, err := state.EvalsByJob(ws, job.Namespace, job.ID) + if err != nil { + return false, fmt.Errorf("failed to read evals: %v", err) + } + for _, eval := range evals { + // TODO: remove this check once the disconnect process stops + // leaking a max-disconnect-timeout eval. + // https://github.com/hashicorp/nomad/issues/12809 + if eval.TriggeredBy == structs.EvalTriggerMaxDisconnectTimeout { + continue + } + + if !eval.TerminalStatus() { + return false, fmt.Errorf("found %s eval", eval.Status) + } + } + return true, nil + }, func(err error) { + must.NoError(t, err) + }) } func TestClientEndpoint_UpdateStatus_HeartbeatRecovery(t *testing.T) { diff --git a/nomad/node_pool_endpoint_test.go b/nomad/node_pool_endpoint_test.go index 9443e2aa8239..f3b0efc6a882 100644 --- a/nomad/node_pool_endpoint_test.go +++ b/nomad/node_pool_endpoint_test.go @@ -11,7 +11,7 @@ import ( "github.com/google/go-cmp/cmp/cmpopts" "github.com/hashicorp/go-memdb" "github.com/hashicorp/go-set/v2" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/mock" diff --git a/nomad/operator_endpoint.go b/nomad/operator_endpoint.go index 511d51d4bc6b..de85ca10521d 100644 --- a/nomad/operator_endpoint.go +++ b/nomad/operator_endpoint.go @@ -13,7 +13,7 @@ import ( "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-memdb" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" version "github.com/hashicorp/go-version" "github.com/hashicorp/raft" "github.com/hashicorp/serf/serf" diff --git a/nomad/operator_endpoint_test.go b/nomad/operator_endpoint_test.go index 1afcb44b2531..822f7757e4ac 100644 --- a/nomad/operator_endpoint_test.go +++ b/nomad/operator_endpoint_test.go @@ -14,13 +14,12 @@ import ( "path" "reflect" "strings" - "sync" "testing" "time" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/go-msgpack/v2/codec" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + "github.com/hashicorp/go-msgpack/codec" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" @@ -379,10 +378,10 @@ func TestOperator_RaftRemovePeerByID_ACL(t *testing.T) { type testcluster struct { t *testing.T - args tcArgs server []*Server cleanup []func() token *structs.ACLToken + rpc func(string, any, any) error } func (tc testcluster) Cleanup() { @@ -407,7 +406,6 @@ func newTestCluster(t *testing.T, args tcArgs) (tc testcluster) { cSize := args.size out := testcluster{ t: t, - args: args, server: make([]*Server, cSize), cleanup: make([]func(), cSize), } @@ -421,46 +419,26 @@ func newTestCluster(t *testing.T, args tcArgs) (tc testcluster) { }) } t.Cleanup(out.Cleanup) + out.rpc = out.server[0].RPC TestJoin(t, out.server...) out.WaitForLeader() if args.enableACL { - s1 := out.server[0] - bsToken := new(structs.ACLToken) // Bootstrap the ACL subsystem - req := &structs.ACLTokenBootstrapRequest{ - Token: bsToken, - WriteRequest: structs.WriteRequest{Region: s1.config.Region}, - } - resp := &structs.ACLTokenUpsertResponse{} - err := out.server[0].RPC("ACL.Bootstrap", req, resp) + token := mock.ACLManagementToken() + err := out.server[0].State().BootstrapACLTokens(structs.MsgTypeTestSetup, 1, 0, token) if err != nil { t.Fatalf("failed to bootstrap ACL token: %v", err) } - t.Logf("bootstrap token: %v", *resp.Tokens[0]) - out.token = resp.Tokens[0] + t.Logf("bootstrap token: %v", *token) + out.token = token } return out } -// WaitForLeader performs a parallel WaitForLeader over each cluster member, -// because testutil doesn't export rpcFn so we can't create a collection of -// rpcFn to use testutil.WaitForLeaders directly. func (tc testcluster) WaitForLeader() { - var wg sync.WaitGroup - for i := 0; i < len(tc.server); i++ { - idx := i - wg.Add(1) - go func() { - defer wg.Done() - - // The WaitForLeader func uses WaitForResultRetries - // so this should timeout at 5 seconds * test multiplier - testutil.WaitForLeader(tc.t, tc.server[idx].RPC) - }() - } - wg.Wait() + testutil.WaitForLeader(tc.t, tc.rpc) } func (tc testcluster) leader() *Server { @@ -472,119 +450,70 @@ func (tc testcluster) leader() *Server { } return nil } -func (tc testcluster) anyFollowerRaftServerID() raft.ServerID { - tc.WaitForLeader() - s1 := tc.server[0] - _, ldrID := s1.raft.LeaderWithID() - var tgtID raft.ServerID - - s1.peerLock.Lock() - defer s1.peerLock.Unlock() - - // Find the first non-leader server in the list. - for _, sp := range s1.localPeers { - tgtID = raft.ServerID(sp.ID) - if tgtID != ldrID { - break - } +func (tc testcluster) anyFollower() *Server { + if len(tc.server) < 2 { + return nil } - return tgtID -} -func (tc testcluster) anyFollowerRaftServerAddress() raft.ServerAddress { - tc.WaitForLeader() - s1 := tc.server[0] - lAddr, _ := s1.raft.LeaderWithID() - var addr raft.ServerAddress - - s1.peerLock.Lock() - defer s1.peerLock.Unlock() - - // Find the first non-leader server in the list. - for a := range s1.localPeers { - addr = a - if addr != lAddr { - break + testutil.WaitForLeader(tc.t, tc.rpc) + for _, s := range tc.server { + if isLeader, _ := s.getLeader(); !isLeader { + return s } } - return addr + // something weird happened. + return nil } func TestOperator_TransferLeadershipToServerAddress_ACL(t *testing.T) { ci.Parallel(t) - var err error tc := newTestCluster(t, tcArgs{enableACL: true}) s1 := tc.leader() - must.NotNil(t, s1) codec := rpcClient(t, s1) + state := s1.fsm.State() - addr := tc.anyFollowerRaftServerAddress() - - mgmtWR := structs.WriteRequest{ - Region: s1.config.Region, - AuthToken: tc.token.SecretID, - } - - // Create invalid ACL Token - pReq := &structs.ACLPolicyUpsertRequest{ - Policies: []*structs.ACLPolicy{ - { - Name: "node-write-only", - Rules: `node { policy = "write" }`, - }, - }, - WriteRequest: mgmtWR, - } - pResp := &structs.GenericResponse{} - err = msgpackrpc.CallWithCodec(codec, structs.ACLUpsertPoliciesRPCMethod, pReq, pResp) - must.NoError(t, err) + lAddr, _ := s1.raft.LeaderWithID() - tReq := &structs.ACLTokenUpsertRequest{ - Tokens: []*structs.ACLToken{ - { - Name: "invalid", - Policies: []string{"node_write_only"}, - Type: structs.ACLClientToken, - }, - }, - WriteRequest: mgmtWR, + var addr raft.ServerAddress + // Find the first non-leader server in the list. + for a := range s1.localPeers { + addr = a + if addr != lAddr { + break + } } - tResp := &structs.ACLTokenUpsertResponse{} - err = msgpackrpc.CallWithCodec(codec, structs.ACLUpsertTokensRPCMethod, tReq, tResp) - must.NoError(t, err) - invalidToken := tResp.Tokens[0] + // Create ACL token + invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid", mock.NodePolicy(acl.PolicyWrite)) - testReq := &structs.RaftPeerRequest{ + arg := &structs.RaftPeerRequest{ RaftIDAddress: structs.RaftIDAddress{Address: addr}, - WriteRequest: structs.WriteRequest{ - Region: s1.config.Region, - }, + WriteRequest: structs.WriteRequest{Region: s1.config.Region}, } var reply struct{} t.Run("no-token", func(t *testing.T) { // Try with no token and expect permission denied - err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", testReq, &reply) + err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", arg, &reply) must.Error(t, err) must.ErrorIs(t, err, rpcPermDeniedErr) }) t.Run("invalid-token", func(t *testing.T) { // Try with an invalid token and expect permission denied - testReq.AuthToken = invalidToken.SecretID - err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", testReq, &reply) + arg.AuthToken = invalidToken.SecretID + err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", arg, &reply) must.Error(t, err) must.ErrorIs(t, err, rpcPermDeniedErr) }) t.Run("good-token", func(t *testing.T) { // Try with a management token - testReq.AuthToken = tc.token.SecretID - err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", testReq, &reply) + arg.AuthToken = tc.token.SecretID + err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", arg, &reply) must.NoError(t, err) // Is the expected leader the new one? @@ -596,76 +525,55 @@ func TestOperator_TransferLeadershipToServerAddress_ACL(t *testing.T) { func TestOperator_TransferLeadershipToServerID_ACL(t *testing.T) { ci.Parallel(t) - var err error - tc := newTestCluster(t, tcArgs{enableACL: true}) s1 := tc.leader() - must.NotNil(t, s1) codec := rpcClient(t, s1) + state := s1.fsm.State() - mgmtWR := structs.WriteRequest{ - Region: s1.config.Region, - AuthToken: tc.token.SecretID, - } - - // Create invalid ACL Token - pReq := &structs.ACLPolicyUpsertRequest{ - Policies: []*structs.ACLPolicy{ - { - Name: "node-write-only", - Rules: `node { policy = "write" }`, - }, - }, - WriteRequest: mgmtWR, - } - pResp := &structs.GenericResponse{} - err = msgpackrpc.CallWithCodec(codec, structs.ACLUpsertPoliciesRPCMethod, pReq, pResp) - must.NoError(t, err) + _, ldrID := s1.raft.LeaderWithID() - tReq := &structs.ACLTokenUpsertRequest{ - Tokens: []*structs.ACLToken{ - { - Name: "invalid", - Policies: []string{"node_write_only"}, - Type: structs.ACLClientToken, - }, - }, - WriteRequest: mgmtWR, + var tgtID raft.ServerID + // Find the first non-leader server in the list. + s1.peerLock.Lock() + for _, sp := range s1.localPeers { + tgtID = raft.ServerID(sp.ID) + if tgtID != ldrID { + break + } } - tResp := &structs.ACLTokenUpsertResponse{} - err = msgpackrpc.CallWithCodec(codec, structs.ACLUpsertTokensRPCMethod, tReq, tResp) - must.NoError(t, err) + s1.peerLock.Unlock() - invalidToken := tResp.Tokens[0] + // Create ACL token + invalidToken := mock.CreatePolicyAndToken(t, state, 1001, "test-invalid", mock.NodePolicy(acl.PolicyWrite)) - tgtID := tc.anyFollowerRaftServerID() - testReq := &structs.RaftPeerRequest{ + arg := &structs.RaftPeerRequest{ RaftIDAddress: structs.RaftIDAddress{ ID: tgtID, }, WriteRequest: structs.WriteRequest{Region: s1.config.Region}, } + var reply struct{} t.Run("no-token", func(t *testing.T) { // Try with no token and expect permission denied - err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", testReq, &reply) + err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", arg, &reply) must.Error(t, err) must.ErrorIs(t, err, rpcPermDeniedErr) }) t.Run("invalid-token", func(t *testing.T) { // Try with an invalid token and expect permission denied - testReq.AuthToken = invalidToken.SecretID - err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", testReq, &reply) + arg.AuthToken = invalidToken.SecretID + err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", arg, &reply) must.Error(t, err) must.ErrorIs(t, err, rpcPermDeniedErr) }) t.Run("good-token", func(t *testing.T) { // Try with a management token - testReq.AuthToken = tc.token.SecretID - err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", testReq, &reply) + arg.AuthToken = tc.token.SecretID + err := msgpackrpc.CallWithCodec(codec, "Operator.TransferLeadershipToPeer", arg, &reply) must.NoError(t, err) // Is the expected leader the new one? diff --git a/nomad/periodic_endpoint_test.go b/nomad/periodic_endpoint_test.go index b862690b1952..d19cfdec4195 100644 --- a/nomad/periodic_endpoint_test.go +++ b/nomad/periodic_endpoint_test.go @@ -7,7 +7,7 @@ import ( "testing" memdb "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" diff --git a/nomad/plan_apply.go b/nomad/plan_apply.go index 57a177f7300a..40237182cc40 100644 --- a/nomad/plan_apply.go +++ b/nomad/plan_apply.go @@ -801,7 +801,7 @@ func isValidForDisconnectedNode(plan *structs.Plan, nodeID string) bool { // as non reschedulables when lost or if the allocs are being updated to lost. func isValidForDownNode(plan *structs.Plan, nodeID string) bool { for _, alloc := range plan.NodeAllocation[nodeID] { - if !(alloc.ClientStatus == structs.AllocClientStatusUnknown && alloc.PreventRescheduleOnDisconnect()) && + if !(alloc.ClientStatus == structs.AllocClientStatusUnknown && alloc.PreventRescheduleOnLost()) && (alloc.ClientStatus != structs.AllocClientStatusLost) { return false } diff --git a/nomad/plan_endpoint_test.go b/nomad/plan_endpoint_test.go index 5c39df0d2b03..6800792e8c07 100644 --- a/nomad/plan_endpoint_test.go +++ b/nomad/plan_endpoint_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" diff --git a/nomad/plan_normalization_test.go b/nomad/plan_normalization_test.go index b167d452bac4..ffa23a5be1b5 100644 --- a/nomad/plan_normalization_test.go +++ b/nomad/plan_normalization_test.go @@ -8,7 +8,7 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" diff --git a/nomad/regions_endpoint_test.go b/nomad/regions_endpoint_test.go index da997fda67ea..7a74d20df09a 100644 --- a/nomad/regions_endpoint_test.go +++ b/nomad/regions_endpoint_test.go @@ -7,7 +7,7 @@ import ( "fmt" "testing" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/testutil" diff --git a/nomad/rpc.go b/nomad/rpc.go index b32cbfaab283..f8dcfc0a4a8b 100644 --- a/nomad/rpc.go +++ b/nomad/rpc.go @@ -21,7 +21,7 @@ import ( "github.com/hashicorp/go-connlimit" log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/pool" "github.com/hashicorp/nomad/nomad/state" diff --git a/nomad/rpc_test.go b/nomad/rpc_test.go index 47c33d288833..a33c69b698df 100644 --- a/nomad/rpc_test.go +++ b/nomad/rpc_test.go @@ -19,9 +19,9 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/go-sockaddr" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/helper/pool" diff --git a/nomad/scaling_endpoint_test.go b/nomad/scaling_endpoint_test.go index 67a7e59061e6..242ad08117b7 100644 --- a/nomad/scaling_endpoint_test.go +++ b/nomad/scaling_endpoint_test.go @@ -7,7 +7,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" diff --git a/nomad/search_endpoint_test.go b/nomad/search_endpoint_test.go index e06688ac927f..38804668323a 100644 --- a/nomad/search_endpoint_test.go +++ b/nomad/search_endpoint_test.go @@ -10,7 +10,7 @@ import ( "strings" "testing" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" diff --git a/nomad/server.go b/nomad/server.go index a488481d997b..09c5e74a3031 100644 --- a/nomad/server.go +++ b/nomad/server.go @@ -1391,19 +1391,8 @@ func (s *Server) setupRaft() error { } // Create a transport layer - logger := log.New(&log.LoggerOptions{ - Name: "raft-net", - Output: s.config.LogOutput, - Level: log.DefaultLevel, - }) - netConfig := &raft.NetworkTransportConfig{ - Stream: s.raftLayer, - MaxPool: 3, - Timeout: s.config.RaftTimeout, - Logger: logger, - MsgpackUseNewTimeFormat: true, - } - trans := raft.NewNetworkTransportWithConfig(netConfig) + trans := raft.NewNetworkTransport(s.raftLayer, 3, s.config.RaftTimeout, + s.config.LogOutput) s.raftTransport = trans // Make sure we set the Logger. @@ -1453,7 +1442,6 @@ func (s *Server) setupRaft() error { BoltOptions: &bbolt.Options{ NoFreelistSync: s.config.RaftBoltNoFreelistSync, }, - MsgpackUseNewTimeFormat: true, }) if raftErr != nil { return raftErr diff --git a/nomad/server_test.go b/nomad/server_test.go index a8358582bc67..343c44ad6c20 100644 --- a/nomad/server_test.go +++ b/nomad/server_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/testlog" "github.com/hashicorp/nomad/helper/uuid" diff --git a/nomad/service_registration_endpoint_test.go b/nomad/service_registration_endpoint_test.go index 0dff59f03263..175cc83932a9 100644 --- a/nomad/service_registration_endpoint_test.go +++ b/nomad/service_registration_endpoint_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" diff --git a/nomad/status_endpoint_test.go b/nomad/status_endpoint_test.go index 1168e9e220a1..0747692fbfbc 100644 --- a/nomad/status_endpoint_test.go +++ b/nomad/status_endpoint_test.go @@ -6,7 +6,7 @@ package nomad import ( "testing" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/helper/uuid" diff --git a/nomad/stream/ndjson.go b/nomad/stream/ndjson.go index 7b24e093fbc8..45287476c0fb 100644 --- a/nomad/stream/ndjson.go +++ b/nomad/stream/ndjson.go @@ -9,7 +9,7 @@ import ( "fmt" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/structs/alloc_test.go b/nomad/structs/alloc_test.go index f38f01917c59..46efb5e991d4 100644 --- a/nomad/structs/alloc_test.go +++ b/nomad/structs/alloc_test.go @@ -5,16 +5,13 @@ package structs import ( "testing" - "time" - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestAllocServiceRegistrationsRequest_StaleReadSupport(t *testing.T) { req := &AllocServiceRegistrationsRequest{} - must.True(t, req.IsRead()) + require.True(t, req.IsRead()) } func Test_Allocation_ServiceProviderNamespace(t *testing.T) { @@ -142,501 +139,7 @@ func Test_Allocation_ServiceProviderNamespace(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actualOutput := tc.inputAllocation.ServiceProviderNamespace() - must.Eq(t, tc.expectedOutput, actualOutput) - }) - } -} - -// Test using stop_after_client_disconnect, remove after its deprecated in favor -// of Disconnect.StopOnClientAfter introduced in 1.8.0. -func TestAllocation_WaitClientStop(t *testing.T) { - ci.Parallel(t) - type testCase struct { - desc string - stop time.Duration - status string - expectedShould bool - expectedRescheduleTime time.Time - } - now := time.Now().UTC() - testCases := []testCase{ - { - desc: "running", - stop: 2 * time.Second, - status: AllocClientStatusRunning, - expectedShould: true, - }, - { - desc: "no stop_after_client_disconnect", - status: AllocClientStatusLost, - expectedShould: false, - }, - { - desc: "stop", - status: AllocClientStatusLost, - stop: 2 * time.Second, - expectedShould: true, - expectedRescheduleTime: now.Add((2 + 5) * time.Second), - }, - } - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - j := testJob() - a := &Allocation{ - ClientStatus: tc.status, - Job: j, - TaskStates: map[string]*TaskState{}, - } - - if tc.status == AllocClientStatusLost { - a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost) - } - - j.TaskGroups[0].StopAfterClientDisconnect = &tc.stop - a.TaskGroup = j.TaskGroups[0].Name - - must.Eq(t, tc.expectedShould, a.ShouldClientStop()) - - if !tc.expectedShould || tc.status != AllocClientStatusLost { - return - } - - // the reschedTime is close to the expectedRescheduleTime - reschedTime := a.WaitClientStop() - e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix() - must.Less(t, int64(2), e) - }) - } -} - -func TestAllocation_WaitClientStop_Disconnect(t *testing.T) { - ci.Parallel(t) - type testCase struct { - desc string - stop time.Duration - status string - expectedShould bool - expectedRescheduleTime time.Time - } - now := time.Now().UTC() - testCases := []testCase{ - { - desc: "running", - stop: 2 * time.Second, - status: AllocClientStatusRunning, - expectedShould: true, - }, - { - desc: "no stop_after_client_disconnect", - status: AllocClientStatusLost, - expectedShould: false, - }, - { - desc: "stop", - status: AllocClientStatusLost, - stop: 2 * time.Second, - expectedShould: true, - expectedRescheduleTime: now.Add((2 + 5) * time.Second), - }, - } - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - j := testJob() - a := &Allocation{ - ClientStatus: tc.status, - Job: j, - TaskStates: map[string]*TaskState{}, - } - - if tc.status == AllocClientStatusLost { - a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost) - } - - j.TaskGroups[0].Disconnect = &DisconnectStrategy{ - StopOnClientAfter: &tc.stop, - } - - a.TaskGroup = j.TaskGroups[0].Name - - must.Eq(t, tc.expectedShould, a.ShouldClientStop()) - - if !tc.expectedShould || tc.status != AllocClientStatusLost { - return - } - - // the reschedTime is close to the expectedRescheduleTime - reschedTime := a.WaitClientStop() - e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix() - must.Less(t, int64(2), e) - }) - } -} - -func TestAllocation_Timeout_Disconnect(t *testing.T) { - type testCase struct { - desc string - maxDisconnect time.Duration - } - - testCases := []testCase{ - { - desc: "has lost_after", - maxDisconnect: 30 * time.Second, - }, - { - desc: "zero lost_after", - maxDisconnect: 0 * time.Second, - }, - } - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - j := testJob() - a := &Allocation{ - Job: j, - } - - j.TaskGroups[0].Disconnect = &DisconnectStrategy{ - LostAfter: tc.maxDisconnect, - } - - a.TaskGroup = j.TaskGroups[0].Name - - now := time.Now() - - reschedTime := a.DisconnectTimeout(now) - - if tc.maxDisconnect == 0 { - must.Equal(t, now, reschedTime, must.Sprint("expected to be now")) - } else { - difference := reschedTime.Sub(now) - must.Eq(t, tc.maxDisconnect, difference, must.Sprint("expected durations to be equal")) - } - - }) - } -} - -// Test using max_client_disconnect, remove after its deprecated in favor -// of Disconnect.LostAfter introduced in 1.8.0. -func TestAllocation_DisconnectTimeout(t *testing.T) { - type testCase struct { - desc string - maxDisconnect *time.Duration - } - - testCases := []testCase{ - { - desc: "no max_client_disconnect", - maxDisconnect: nil, - }, - { - desc: "has max_client_disconnect", - maxDisconnect: pointer.Of(30 * time.Second), - }, - { - desc: "zero max_client_disconnect", - maxDisconnect: pointer.Of(0 * time.Second), - }, - } - - for _, tc := range testCases { - t.Run(tc.desc, func(t *testing.T) { - j := testJob() - a := &Allocation{ - Job: j, - } - - j.TaskGroups[0].MaxClientDisconnect = tc.maxDisconnect - a.TaskGroup = j.TaskGroups[0].Name - - now := time.Now() - - reschedTime := a.DisconnectTimeout(now) - - if tc.maxDisconnect == nil { - must.Equal(t, now, reschedTime, must.Sprint("expected to be now")) - } else { - difference := reschedTime.Sub(now) - must.Eq(t, *tc.maxDisconnect, difference, must.Sprint("expected durations to be equal")) - } - }) - } -} - -// Test using max_client_disconnect, remove after its deprecated in favor -// of Disconnect.LostAfter introduced in 1.8.0. -func TestAllocation_Expired(t *testing.T) { - type testCase struct { - name string - maxDisconnect string - ellapsed int - expected bool - nilJob bool - badTaskGroup bool - mixedUTC bool - noReconnectEvent bool - status string - } - - testCases := []testCase{ - { - name: "has-expired", - maxDisconnect: "5s", - ellapsed: 10, - expected: true, - }, - { - name: "has-not-expired", - maxDisconnect: "5s", - ellapsed: 3, - expected: false, - }, - { - name: "are-equal", - maxDisconnect: "5s", - ellapsed: 5, - expected: true, - }, - { - name: "nil-job", - maxDisconnect: "5s", - ellapsed: 10, - expected: false, - nilJob: true, - }, - { - name: "wrong-status", - maxDisconnect: "5s", - ellapsed: 10, - expected: false, - status: AllocClientStatusRunning, - }, - { - name: "bad-task-group", - maxDisconnect: "", - badTaskGroup: true, - ellapsed: 10, - expected: false, - }, - { - name: "no-max-disconnect", - maxDisconnect: "", - ellapsed: 10, - expected: false, - }, - { - name: "mixed-utc-has-expired", - maxDisconnect: "5s", - ellapsed: 10, - mixedUTC: true, - expected: true, - }, - { - name: "mixed-utc-has-not-expired", - maxDisconnect: "5s", - ellapsed: 3, - mixedUTC: true, - expected: false, - }, - { - name: "no-reconnect-event", - maxDisconnect: "5s", - ellapsed: 2, - expected: false, - noReconnectEvent: true, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - alloc := MockAlloc() - var err error - var maxDisconnect time.Duration - - if tc.maxDisconnect != "" { - maxDisconnect, err = time.ParseDuration(tc.maxDisconnect) - must.NoError(t, err) - alloc.Job.TaskGroups[0].MaxClientDisconnect = &maxDisconnect - } - - if tc.nilJob { - alloc.Job = nil - } - - if tc.badTaskGroup { - alloc.TaskGroup = "bad" - } - - alloc.ClientStatus = AllocClientStatusUnknown - if tc.status != "" { - alloc.ClientStatus = tc.status - } - - alloc.AllocStates = []*AllocState{{ - Field: AllocStateFieldClientStatus, - Value: AllocClientStatusUnknown, - Time: time.Now(), - }} - - must.NoError(t, err) - now := time.Now().UTC() - if tc.mixedUTC { - now = time.Now() - } - - if !tc.noReconnectEvent { - event := NewTaskEvent(TaskClientReconnected) - event.Time = now.UnixNano() - - alloc.TaskStates = map[string]*TaskState{ - "web": { - Events: []*TaskEvent{event}, - }, - } - } - - ellapsedDuration := time.Duration(tc.ellapsed) * time.Second - now = now.Add(ellapsedDuration) - - must.Eq(t, tc.expected, alloc.Expired(now)) - }) - } -} - -func TestAllocation_Expired_Disconnected(t *testing.T) { - type testCase struct { - name string - maxDisconnect string - ellapsed int - expected bool - nilJob bool - badTaskGroup bool - mixedUTC bool - noReconnectEvent bool - status string - } - - testCases := []testCase{ - { - name: "has-expired", - maxDisconnect: "5s", - ellapsed: 10, - expected: true, - }, - { - name: "has-not-expired", - maxDisconnect: "5s", - ellapsed: 3, - expected: false, - }, - { - name: "are-equal", - maxDisconnect: "5s", - ellapsed: 5, - expected: true, - }, - { - name: "nil-job", - maxDisconnect: "5s", - ellapsed: 10, - expected: false, - nilJob: true, - }, - { - name: "wrong-status", - maxDisconnect: "5s", - ellapsed: 10, - expected: false, - status: AllocClientStatusRunning, - }, - { - name: "bad-task-group", - maxDisconnect: "", - badTaskGroup: true, - ellapsed: 10, - expected: false, - }, - { - name: "no-max-disconnect", - maxDisconnect: "", - ellapsed: 10, - expected: false, - }, - { - name: "mixed-utc-has-expired", - maxDisconnect: "5s", - ellapsed: 10, - mixedUTC: true, - expected: true, - }, - { - name: "mixed-utc-has-not-expired", - maxDisconnect: "5s", - ellapsed: 3, - mixedUTC: true, - expected: false, - }, - { - name: "no-reconnect-event", - maxDisconnect: "5s", - ellapsed: 2, - expected: false, - noReconnectEvent: true, - }, - } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - alloc := MockAlloc() - var err error - var maxDisconnect time.Duration - - if tc.maxDisconnect != "" { - maxDisconnect, err = time.ParseDuration(tc.maxDisconnect) - must.NoError(t, err) - alloc.Job.TaskGroups[0].Disconnect = &DisconnectStrategy{ - LostAfter: maxDisconnect, - } - } - - if tc.nilJob { - alloc.Job = nil - } - - if tc.badTaskGroup { - alloc.TaskGroup = "bad" - } - - alloc.ClientStatus = AllocClientStatusUnknown - if tc.status != "" { - alloc.ClientStatus = tc.status - } - - alloc.AllocStates = []*AllocState{{ - Field: AllocStateFieldClientStatus, - Value: AllocClientStatusUnknown, - Time: time.Now(), - }} - - must.NoError(t, err) - now := time.Now().UTC() - if tc.mixedUTC { - now = time.Now() - } - - if !tc.noReconnectEvent { - event := NewTaskEvent(TaskClientReconnected) - event.Time = now.UnixNano() - - alloc.TaskStates = map[string]*TaskState{ - "web": { - Events: []*TaskEvent{event}, - }, - } - } - - ellapsedDuration := time.Duration(tc.ellapsed) * time.Second - now = now.Add(ellapsedDuration) - - must.Eq(t, tc.expected, alloc.Expired(now)) + require.Equal(t, tc.expectedOutput, actualOutput) }) } } diff --git a/nomad/structs/config/artifact_test.go b/nomad/structs/config/artifact_test.go index 144893f3dbc6..f78feecb8664 100644 --- a/nomad/structs/config/artifact_test.go +++ b/nomad/structs/config/artifact_test.go @@ -418,7 +418,8 @@ func TestArtifactConfig_Validate(t *testing.T) { err := a.Validate() if tc.expErr != "" { - must.ErrorContains(t, err, tc.expErr) + must.Error(t, err) + must.StrContains(t, err.Error(), tc.expErr) } else { must.NoError(t, err) } diff --git a/nomad/structs/config/users.go b/nomad/structs/config/users.go deleted file mode 100644 index 8d204f35830c..000000000000 --- a/nomad/structs/config/users.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package config - -import ( - "errors" - - "github.com/hashicorp/nomad/helper/pointer" -) - -// UsersConfig configures things related to operating system users. -type UsersConfig struct { - // MinDynamicUser is the lowest uid/gid for use in the dynamic users pool. - MinDynamicUser *int `hcl:"dynamic_user_min"` - - // MaxDynamicUser is the highest uid/gid for use in the dynamic users pool. - MaxDynamicUser *int `hcl:"dynamic_user_max"` -} - -// Copy returns a deep copy of the Users struct. -func (u *UsersConfig) Copy() *UsersConfig { - if u == nil { - return nil - } - return &UsersConfig{ - MinDynamicUser: pointer.Copy(u.MinDynamicUser), - MaxDynamicUser: pointer.Copy(u.MaxDynamicUser), - } -} - -// Merge returns a new Users where non-empty/nil fields in the argument have -// higher precedence. -func (u *UsersConfig) Merge(o *UsersConfig) *UsersConfig { - switch { - case u == nil: - return o.Copy() - case o == nil: - return u.Copy() - default: - return &UsersConfig{ - MinDynamicUser: pointer.Merge(u.MinDynamicUser, o.MinDynamicUser), - MaxDynamicUser: pointer.Merge(u.MaxDynamicUser, o.MaxDynamicUser), - } - } -} - -// Equal returns whether u and o are the same. -func (u *UsersConfig) Equal(o *UsersConfig) bool { - if u == nil || o == nil { - return u == o - } - switch { - case !pointer.Eq(u.MinDynamicUser, o.MinDynamicUser): - return false - case !pointer.Eq(u.MaxDynamicUser, o.MaxDynamicUser): - return false - default: - return true - } -} - -var ( - errUsersUnset = errors.New("users must not be nil") - errDynamicUserMinUnset = errors.New("dynamic_user_min must be set") - errDynamicUserMinInvalid = errors.New("dynamic_user_min must not be negative") - errDynamicUserMaxUnset = errors.New("dynamic_user_max must be set") - errDynamicUserMaxInvalid = errors.New("dynamic_user_max must not be negative") -) - -// Validate whether UsersConfig is valid. -// -// Note that -1 is a valid value for min/max dynamic users, as this is used -// to indicate the dynamic workload users feature should be disabled. -func (u *UsersConfig) Validate() error { - if u == nil { - return errUsersUnset - } - if u.MinDynamicUser == nil { - return errDynamicUserMinUnset - } - if *u.MinDynamicUser < -1 { - return errDynamicUserMinInvalid - } - if u.MaxDynamicUser == nil { - return errDynamicUserMaxUnset - } - if *u.MaxDynamicUser < -1 { - return errDynamicUserMaxInvalid - } - return nil -} - -// DefaultUsersConfig returns the default users configuration. -func DefaultUsersConfig() *UsersConfig { - return &UsersConfig{ - MinDynamicUser: pointer.Of(80_000), - MaxDynamicUser: pointer.Of(89_999), - } -} diff --git a/nomad/structs/config/users_test.go b/nomad/structs/config/users_test.go deleted file mode 100644 index aed036b3e6a8..000000000000 --- a/nomad/structs/config/users_test.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package config - -import ( - "testing" - - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" -) - -func TestUsersConfig_Copy(t *testing.T) { - ci.Parallel(t) - - a := DefaultUsersConfig() - b := a.Copy() - must.Equal(t, a, b) - must.Equal(t, b, a) - - a.MaxDynamicUser = pointer.Of(1000) - must.NotEqual(t, a, b) - must.NotEqual(t, b, a) -} - -func TestUsersConfig_Merge(t *testing.T) { - ci.Parallel(t) - - cases := []struct { - name string - source *UsersConfig - other *UsersConfig - exp *UsersConfig - }{ - { - name: "merge all fields", - source: &UsersConfig{ - MinDynamicUser: pointer.Of(100), - MaxDynamicUser: pointer.Of(200), - }, - other: &UsersConfig{ - MinDynamicUser: pointer.Of(3000), - MaxDynamicUser: pointer.Of(4000), - }, - exp: &UsersConfig{ - MinDynamicUser: pointer.Of(3000), - MaxDynamicUser: pointer.Of(4000), - }, - }, - { - name: "null source", - source: nil, - other: &UsersConfig{ - MinDynamicUser: pointer.Of(100), - MaxDynamicUser: pointer.Of(200), - }, - exp: &UsersConfig{ - MinDynamicUser: pointer.Of(100), - MaxDynamicUser: pointer.Of(200), - }, - }, - { - name: "null other", - other: nil, - source: &UsersConfig{ - MinDynamicUser: pointer.Of(100), - MaxDynamicUser: pointer.Of(200), - }, - exp: &UsersConfig{ - MinDynamicUser: pointer.Of(100), - MaxDynamicUser: pointer.Of(200), - }, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - got := tc.source.Merge(tc.other) - must.Equal(t, tc.exp, got) - }) - } -} - -func TestUsersConfig_Validate(t *testing.T) { - ci.Parallel(t) - - // default config should be valid of course - must.NoError(t, DefaultUsersConfig().Validate()) - - // nil config is not valid - must.ErrorIs(t, ((*UsersConfig)(nil)).Validate(), errUsersUnset) - - cases := []struct { - name string - modify func(*UsersConfig) - exp error - }{ - { - name: "min dynamic user not set", - modify: func(u *UsersConfig) { - u.MinDynamicUser = nil - }, - exp: errDynamicUserMinUnset, - }, - { - name: "min dynamic user not valid", - modify: func(u *UsersConfig) { - u.MinDynamicUser = pointer.Of(-2) - }, - exp: errDynamicUserMinInvalid, - }, - { - name: "max dynamic user not set", - modify: func(u *UsersConfig) { - u.MaxDynamicUser = nil - }, - exp: errDynamicUserMaxUnset, - }, - { - name: "max dynamic user not valid", - modify: func(u *UsersConfig) { - u.MaxDynamicUser = pointer.Of(-2) - }, - exp: errDynamicUserMaxInvalid, - }, - } - - for _, tc := range cases { - t.Run(tc.name, func(t *testing.T) { - u := DefaultUsersConfig() - if tc.modify != nil { - tc.modify(u) - } - err := u.Validate() - must.ErrorIs(t, err, tc.exp) - }) - } -} diff --git a/nomad/structs/diff.go b/nomad/structs/diff.go index 1b6bbed671f4..5d116b22bdd9 100644 --- a/nomad/structs/diff.go +++ b/nomad/structs/diff.go @@ -338,11 +338,6 @@ func (tg *TaskGroup) Diff(other *TaskGroup, contextual bool) (*TaskGroupDiff, er diff.Objects = append(diff.Objects, uDiff) } - // Disconnect diff - if disconnectDiff := disconectStrategyDiffs(tg.Disconnect, other.Disconnect, contextual); disconnectDiff != nil { - diff.Objects = append(diff.Objects, disconnectDiff) - } - // Network Resources diff if nDiffs := networkResourceDiffs(tg.Networks, other.Networks, contextual); nDiffs != nil { diff.Objects = append(diff.Objects, nDiffs...) @@ -2488,30 +2483,6 @@ func (d *DNSConfig) Diff(other *DNSConfig, contextual bool) *ObjectDiff { return diff } -func disconectStrategyDiffs(old, new *DisconnectStrategy, contextual bool) *ObjectDiff { - diff := &ObjectDiff{Type: DiffTypeNone, Name: "Disconnect"} - var oldDisconnectFlat, newDisconnectFlat map[string]string - - if reflect.DeepEqual(old, new) { - return nil - } else if old == nil { - diff.Type = DiffTypeAdded - newDisconnectFlat = flatmap.Flatten(new, nil, false) - } else if new == nil { - diff.Type = DiffTypeDeleted - oldDisconnectFlat = flatmap.Flatten(old, nil, false) - } else { - diff.Type = DiffTypeEdited - oldDisconnectFlat = flatmap.Flatten(old, nil, false) - newDisconnectFlat = flatmap.Flatten(new, nil, false) - } - - // Diff the primitive fields. - diff.Fields = fieldDiffs(oldDisconnectFlat, newDisconnectFlat, contextual) - - return diff -} - // networkResourceDiffs diffs a set of NetworkResources. If contextual diff is enabled, // non-changed fields will still be returned. func networkResourceDiffs(old, new []*NetworkResource, contextual bool) []*ObjectDiff { diff --git a/nomad/structs/diff_test.go b/nomad/structs/diff_test.go index cef7736881e4..5e9dca389fac 100644 --- a/nomad/structs/diff_test.go +++ b/nomad/structs/diff_test.go @@ -2970,209 +2970,6 @@ func TestTaskGroupDiff(t *testing.T) { }, }, }, - { - TestCase: "Disconnect strategy deleted", - Old: &TaskGroup{ - Disconnect: &DisconnectStrategy{ - LostAfter: 1 * time.Second, - Replace: pointer.Of(true), - Reconcile: ReconcileOptionLongestRunning, - StopOnClientAfter: pointer.Of(1 * time.Second), - }, - }, - New: &TaskGroup{}, - Expected: &TaskGroupDiff{ - Type: DiffTypeEdited, - Objects: []*ObjectDiff{ - { - Type: DiffTypeDeleted, - Name: "Disconnect", - Fields: []*FieldDiff{ - { - Type: DiffTypeDeleted, - Name: "LostAfter", - Old: "1000000000", - New: "", - }, - { - Type: DiffTypeDeleted, - Name: "Reconcile", - Old: ReconcileOptionLongestRunning, - New: "", - }, - { - Type: DiffTypeDeleted, - Name: "Replace", - Old: "true", - New: "", - }, - { - Type: DiffTypeDeleted, - Name: "StopOnClientAfter", - Old: "1000000000", - New: "", - }, - }, - }, - }, - }, - }, - { - TestCase: "Disconnect strategy added", - Old: &TaskGroup{}, - New: &TaskGroup{ - Disconnect: &DisconnectStrategy{ - LostAfter: time.Second, - Replace: pointer.Of(true), - Reconcile: ReconcileOptionLongestRunning, - StopOnClientAfter: pointer.Of(1 * time.Second), - }, - }, - Expected: &TaskGroupDiff{ - Type: DiffTypeEdited, - Objects: []*ObjectDiff{ - { - Type: DiffTypeAdded, - Name: "Disconnect", - Fields: []*FieldDiff{ - { - Type: DiffTypeAdded, - Name: "LostAfter", - Old: "", - New: "1000000000", - }, - { - Type: DiffTypeAdded, - Name: "Reconcile", - Old: "", - New: ReconcileOptionLongestRunning, - }, - { - Type: DiffTypeAdded, - Name: "Replace", - Old: "", - New: "true", - }, - { - Type: DiffTypeAdded, - Name: "StopOnClientAfter", - Old: "", - New: "1000000000", - }, - }, - }, - }, - }, - }, - { - TestCase: "Disconnect strategy edited", - Old: &TaskGroup{ - Disconnect: &DisconnectStrategy{ - LostAfter: time.Second, - Replace: pointer.Of(false), - Reconcile: ReconcileOptionLongestRunning, - StopOnClientAfter: pointer.Of(1 * time.Second), - }, - }, - New: &TaskGroup{ - Disconnect: &DisconnectStrategy{ - LostAfter: time.Minute, - Replace: pointer.Of(true), - Reconcile: ReconcileOptionBestScore, - StopOnClientAfter: pointer.Of(1 * time.Minute), - }, - }, - Expected: &TaskGroupDiff{ - Type: DiffTypeEdited, - Objects: []*ObjectDiff{ - { - Type: DiffTypeEdited, - Name: "Disconnect", - Fields: []*FieldDiff{ - { - Type: DiffTypeEdited, - Name: "LostAfter", - Old: "1000000000", - New: "60000000000", - }, - { - Type: DiffTypeEdited, - Name: "Reconcile", - Old: ReconcileOptionLongestRunning, - New: ReconcileOptionBestScore, - }, - { - Type: DiffTypeEdited, - Name: "Replace", - Old: "false", - New: "true", - }, - { - Type: DiffTypeEdited, - Name: "StopOnClientAfter", - Old: "1000000000", - New: "60000000000", - }, - }, - }, - }, - }, - }, - { - TestCase: "Disconnect strategy edited with context", - Contextual: true, - Old: &TaskGroup{ - Disconnect: &DisconnectStrategy{ - LostAfter: time.Second, - Replace: pointer.Of(false), - Reconcile: ReconcileOptionLongestRunning, - StopOnClientAfter: pointer.Of(1 * time.Second), - }, - }, - New: &TaskGroup{ - Disconnect: &DisconnectStrategy{ - LostAfter: time.Minute, - Replace: pointer.Of(true), - Reconcile: ReconcileOptionBestScore, - StopOnClientAfter: pointer.Of(1 * time.Second), - }, - }, - Expected: &TaskGroupDiff{ - Type: DiffTypeEdited, - Objects: []*ObjectDiff{ - { - Type: DiffTypeEdited, - Name: "Disconnect", - Fields: []*FieldDiff{ - { - Type: DiffTypeEdited, - Name: "LostAfter", - Old: "1000000000", - New: "60000000000", - }, - { - Type: DiffTypeEdited, - Name: "Reconcile", - Old: ReconcileOptionLongestRunning, - New: ReconcileOptionBestScore, - }, - { - Type: DiffTypeEdited, - Name: "Replace", - Old: "false", - New: "true", - }, - { - Type: DiffTypeNone, - Name: "StopOnClientAfter", - Old: "1000000000", - New: "1000000000", - }, - }, - }, - }, - }, - }, { TestCase: "EphemeralDisk added", Old: &TaskGroup{}, @@ -5631,12 +5428,6 @@ func TestTaskDiff(t *testing.T) { Old: "", New: "user2", }, - { - Type: DiffTypeAdded, - Name: "GetterInsecure", - Old: "", - New: "false", - }, { Type: DiffTypeAdded, Name: "GetterMode", @@ -5673,13 +5464,6 @@ func TestTaskDiff(t *testing.T) { Old: "user1", New: "", }, - - { - Type: DiffTypeDeleted, - Name: "GetterInsecure", - Old: "false", - New: "", - }, { Type: DiffTypeDeleted, Name: "GetterMode", diff --git a/nomad/structs/encoding.go b/nomad/structs/encoding.go index a5ad0702b918..4ed3ecf4d314 100644 --- a/nomad/structs/encoding.go +++ b/nomad/structs/encoding.go @@ -6,7 +6,7 @@ package structs import ( "reflect" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" ) // extendFunc is a mapping from one struct to another, to change the shape of the encoded JSON diff --git a/nomad/structs/generate.sh b/nomad/structs/generate.sh index 17bddbdd598a..3d2135ecd932 100755 --- a/nomad/structs/generate.sh +++ b/nomad/structs/generate.sh @@ -6,7 +6,7 @@ set -e FILES="$(ls ./*.go | grep -v -e _test.go -e .generated.go | tr '\n' ' ')" codecgen \ - -c github.com/hashicorp/go-msgpack/v2/codec \ + -c github.com/hashicorp/go-msgpack/codec \ -st codec \ -d 100 \ -t codegen_generated \ diff --git a/nomad/structs/group.go b/nomad/structs/group.go deleted file mode 100644 index 8aef826a5302..000000000000 --- a/nomad/structs/group.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package structs - -import ( - "errors" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - "github.com/hashicorp/nomad/helper/pointer" -) - -const ( - // ReconcileOption is used to specify the behavior of the reconciliation process - // between the original allocations and the replacements when a previously - // disconnected client comes back online. - ReconcileOptionKeepOriginal = "keep_original" - ReconcileOptionKeepReplacement = "keep_replacement" - ReconcileOptionBestScore = "best_score" - ReconcileOptionLongestRunning = "longest_running" -) - -var ( - // Disconnect strategy validation errors - errStopAndLost = errors.New("Disconnect cannot be configured with both lost_after and stop_after") - errNegativeLostAfter = errors.New("lost_after cannot be a negative duration") - errNegativeStopAfter = errors.New("stop_after cannot be a negative duration") - errStopAfterNonService = errors.New("stop_after can only be used with service or batch job types") - errInvalidReconcile = errors.New("reconcile option is invalid") -) - -func NewDefaultDisconnectStrategy() *DisconnectStrategy { - return &DisconnectStrategy{ - Replace: pointer.Of(true), - Reconcile: ReconcileOptionBestScore, - } -} - -// Disconnect strategy defines how both clients and server should behave in case of -// disconnection between them. -type DisconnectStrategy struct { - // Defines for how long the server will consider the unresponsive node as - // disconnected but alive instead of lost. - LostAfter time.Duration `mapstructure:"lost_after" hcl:"lost_after,optional"` - - // Defines for how long a disconnected client will keep its allocations running. - // This option has a different behavior for nil, the default, and time.Duration(0), - // and needs to be intentionally set/unset. - StopOnClientAfter *time.Duration `mapstructure:"stop_on_client_after" hcl:"stop_on_client_after,optional"` - - // A boolean field used to define if the allocations should be replaced while - // its considered disconnected. - // This option has a different behavior for nil, the default, and false, - // and needs to be intentionally set/unset. It needs to be set to true - // for compatibility. - Replace *bool `mapstructure:"replace" hcl:"replace,optional"` - - // Once the disconnected node starts reporting again, it will define which - // instances to keep: the original allocations, the replacement, the one - // running on the node with the best score as it is currently implemented, - // or the allocation that has been running continuously the longest. - Reconcile string `mapstructure:"reconcile" hcl:"reconcile,optional"` -} - -func (ds *DisconnectStrategy) Validate(job *Job) error { - if ds == nil { - return nil - } - - var mErr *multierror.Error - - if ds.StopOnClientAfter != nil { - if *ds.StopOnClientAfter < 0 { - mErr = multierror.Append(mErr, errNegativeStopAfter) - } - - if job.Type != JobTypeService && job.Type != JobTypeBatch { - mErr = multierror.Append(mErr, errStopAfterNonService) - } - } - - if ds.LostAfter < 0 { - mErr = multierror.Append(mErr, errNegativeLostAfter) - } - - if ds.StopOnClientAfter != nil && ds.LostAfter != 0 { - mErr = multierror.Append(mErr, errStopAndLost) - } - - switch ds.Reconcile { - case "", ReconcileOptionBestScore, ReconcileOptionLongestRunning, - ReconcileOptionKeepOriginal, ReconcileOptionKeepReplacement: - default: - mErr = multierror.Append(mErr, fmt.Errorf("%w: %s", errInvalidReconcile, ds.Reconcile)) - } - - return mErr.ErrorOrNil() -} - -func (ds *DisconnectStrategy) Copy() *DisconnectStrategy { - if ds == nil { - return nil - } - - nds := new(DisconnectStrategy) - *nds = *ds - - if ds.StopOnClientAfter != nil { - nds.StopOnClientAfter = pointer.Of(*ds.StopOnClientAfter) - } - - if ds.Replace != nil { - nds.Replace = pointer.Of(*ds.Replace) - } - - return nds -} - -func (ds *DisconnectStrategy) Canonicalize() { - if ds.Replace == nil { - ds.Replace = pointer.Of(true) - } - - if ds.Reconcile == "" { - ds.Reconcile = ReconcileOptionBestScore - } -} - -// ReconcileStrategy returns the strategy to be used when reconciling allocations -// after a client reconnects. Best score is the default one. -func (ds *DisconnectStrategy) ReconcileStrategy() string { - if ds == nil || ds.Reconcile == "" { - return ReconcileOptionBestScore - } - - return ds.Reconcile -} diff --git a/nomad/structs/group_test.go b/nomad/structs/group_test.go deleted file mode 100644 index de40644c2662..000000000000 --- a/nomad/structs/group_test.go +++ /dev/null @@ -1,275 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package structs - -import ( - "errors" - "fmt" - "testing" - "time" - - "github.com/hashicorp/nomad/ci" - "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" -) - -func TestJobConfig_Validate_LostAfter_Disconnect(t *testing.T) { - // Set up a job with an invalid Disconnect.LostAfter value - job := testJob() - timeout := -1 * time.Minute - job.TaskGroups[0].Disconnect = &DisconnectStrategy{ - LostAfter: timeout, - StopOnClientAfter: &timeout, - } - - err := job.Validate() - must.Error(t, err) - err = errors.Unwrap(err) - - must.StrContains(t, err.Error(), errNegativeLostAfter.Error()) - must.StrContains(t, err.Error(), errNegativeStopAfter.Error()) - must.StrContains(t, err.Error(), errStopAndLost.Error()) - - // Modify the job with a valid Disconnect.LostAfter value - timeout = 1 * time.Minute - job.TaskGroups[0].Disconnect = &DisconnectStrategy{ - LostAfter: timeout, - StopOnClientAfter: nil, - } - err = job.Validate() - must.NoError(t, err) -} - -func TestDisconnectStrategy_Validate(t *testing.T) { - ci.Parallel(t) - - cases := []struct { - name string - strategy *DisconnectStrategy - jobType string - err error - }{ - { - name: "negative-stop-after", - strategy: &DisconnectStrategy{ - StopOnClientAfter: pointer.Of(-1 * time.Second), - }, - jobType: JobTypeService, - err: errNegativeStopAfter, - }, - { - name: "stop-after-on-system", - strategy: &DisconnectStrategy{ - StopOnClientAfter: pointer.Of(1 * time.Second), - }, - jobType: JobTypeSystem, - err: errStopAfterNonService, - }, - { - name: "negative-lost-after", - strategy: &DisconnectStrategy{ - LostAfter: -1 * time.Second, - }, - jobType: JobTypeService, - err: errNegativeLostAfter, - }, - { - name: "lost-after-and-stop-after-enabled", - strategy: &DisconnectStrategy{ - LostAfter: 1 * time.Second, - StopOnClientAfter: pointer.Of(1 * time.Second), - }, - jobType: JobTypeService, - err: errStopAndLost, - }, - { - name: "invalid-reconcile", - strategy: &DisconnectStrategy{ - LostAfter: 1 * time.Second, - Reconcile: "invalid", - }, - jobType: JobTypeService, - err: errInvalidReconcile, - }, - { - name: "valid-configuration", - strategy: &DisconnectStrategy{ - LostAfter: 1 * time.Second, - Reconcile: ReconcileOptionKeepOriginal, - Replace: pointer.Of(true), - StopOnClientAfter: nil, - }, - jobType: JobTypeService, - err: nil, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - job := testJob() - job.Type = c.jobType - err := c.strategy.Validate(job) - if !errors.Is(err, c.err) { - t.Errorf("expected error %v, got %v", c.err, err) - } - }) - } -} - -func TestReconcileStrategy(t *testing.T) { - ci.Parallel(t) - - cases := []struct { - name string - disconnectBlock *DisconnectStrategy - expected string - }{ - { - name: "nil_disconnect_default_to_best_score", - disconnectBlock: nil, - expected: ReconcileOptionBestScore, - }, - { - name: "empty_reconcile_default_to_best_score", - disconnectBlock: &DisconnectStrategy{}, - expected: ReconcileOptionBestScore, - }, - { - name: "longest_running", - disconnectBlock: &DisconnectStrategy{ - Reconcile: ReconcileOptionLongestRunning, - }, - expected: ReconcileOptionLongestRunning, - }, - } - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - rs := c.disconnectBlock.ReconcileStrategy() - must.Eq(t, c.expected, rs) - }) - } -} - -func TestJobConfig_Validate_StopAfterClient_Disconnect(t *testing.T) { - ci.Parallel(t) - // Setup a system Job with Disconnect.StopOnClientAfter set, which is invalid - job := testJob() - job.Type = JobTypeSystem - stop := 1 * time.Minute - job.TaskGroups[0].Disconnect = &DisconnectStrategy{ - StopOnClientAfter: &stop, - } - - err := job.Validate() - must.Error(t, err) - must.StrContains(t, err.Error(), errStopAfterNonService.Error()) - - // Modify the job to a batch job with an invalid Disconnect.StopOnClientAfter value - job.Type = JobTypeBatch - invalid := -1 * time.Minute - job.TaskGroups[0].Disconnect = &DisconnectStrategy{ - StopOnClientAfter: &invalid, - } - - err = job.Validate() - must.Error(t, err) - must.StrContains(t, err.Error(), errNegativeStopAfter.Error()) - - // Modify the job to a batch job with a valid Disconnect.StopOnClientAfter value - job.Type = JobTypeBatch - job.TaskGroups[0].Disconnect = &DisconnectStrategy{ - StopOnClientAfter: &stop, - } - err = job.Validate() - must.NoError(t, err) -} - -// Test using stop_after_client_disconnect, remove after its deprecated in favor -// of Disconnect.StopOnClientAfter introduced in 1.8.0. -func TestJobConfig_Validate_StopAfterClientDisconnect(t *testing.T) { - ci.Parallel(t) - // Setup a system Job with stop_after_client_disconnect set, which is invalid - job := testJob() - job.Type = JobTypeSystem - stop := 1 * time.Minute - job.TaskGroups[0].StopAfterClientDisconnect = &stop - - err := job.Validate() - must.Error(t, err) - must.StrContains(t, err.Error(), "stop_after_client_disconnect can only be set in batch and service jobs") - - // Modify the job to a batch job with an invalid stop_after_client_disconnect value - job.Type = JobTypeBatch - invalid := -1 * time.Minute - job.TaskGroups[0].StopAfterClientDisconnect = &invalid - - err = job.Validate() - must.Error(t, err) - must.StrContains(t, err.Error(), "stop_after_client_disconnect must be a positive value") - - // Modify the job to a batch job with a valid stop_after_client_disconnect value - job.Type = JobTypeBatch - job.TaskGroups[0].StopAfterClientDisconnect = &stop - err = job.Validate() - must.NoError(t, err) -} - -func TestJob_Validate_DisconnectRescheduleLost(t *testing.T) { - ci.Parallel(t) - - // Craft our speciality jobspec to test this particular use-case. - testDisconnectRescheduleLostJob := &Job{ - ID: "gh19644", - Name: "gh19644", - Region: "global", - Type: JobTypeSystem, - TaskGroups: []*TaskGroup{ - { - Name: "cache", - Disconnect: &DisconnectStrategy{ - LostAfter: 1 * time.Hour, - Replace: pointer.Of(false), - }, - Tasks: []*Task{ - { - Name: "redis", - Driver: "docker", - Config: map[string]interface{}{ - "image": "redis:7", - }, - LogConfig: DefaultLogConfig(), - }, - }, - }, - }, - } - - testDisconnectRescheduleLostJob.Canonicalize() - - must.NoError(t, testDisconnectRescheduleLostJob.Validate()) -} - -// Test using max_client_disconnect, remove after its deprecated in favor -// of Disconnect.LostAfter introduced in 1.8.0. -func TestJobConfig_Validate_MaxClientDisconnect(t *testing.T) { - // Set up a job with an invalid max_client_disconnect value - job := testJob() - timeout := -1 * time.Minute - job.TaskGroups[0].MaxClientDisconnect = &timeout - job.TaskGroups[0].StopAfterClientDisconnect = &timeout - - err := job.Validate() - must.Error(t, errors.Unwrap(err)) - fmt.Println("what?", err.Error(), "what?") - must.StrContains(t, err.Error(), "max_client_disconnect cannot be negative") - must.StrContains(t, err.Error(), "Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect") - - // Modify the job with a valid max_client_disconnect value - timeout = 1 * time.Minute - job.TaskGroups[0].MaxClientDisconnect = &timeout - job.TaskGroups[0].StopAfterClientDisconnect = nil - err = job.Validate() - must.NoError(t, err) -} diff --git a/nomad/structs/handlers.go b/nomad/structs/handlers.go index ae0f85e78383..bc370af184a4 100644 --- a/nomad/structs/handlers.go +++ b/nomad/structs/handlers.go @@ -4,7 +4,7 @@ package structs import ( - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" ) var ( diff --git a/nomad/structs/structs.go b/nomad/structs/structs.go index 8e1cc49a703e..47c81910c77c 100644 --- a/nomad/structs/structs.go +++ b/nomad/structs/structs.go @@ -31,7 +31,7 @@ import ( jwt "github.com/go-jose/go-jose/v3/jwt" "github.com/hashicorp/cronexpr" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-set/v2" "github.com/hashicorp/go-version" @@ -4596,7 +4596,6 @@ func (j *Job) Validate() error { } else if strings.Contains(j.Name, "\000") { mErr.Errors = append(mErr.Errors, errors.New("Job Name contains a null character")) } - if j.Namespace == "" { mErr.Errors = append(mErr.Errors, errors.New("Job must be in a namespace")) } @@ -6614,10 +6613,6 @@ type TaskGroup struct { // RestartPolicy of a TaskGroup RestartPolicy *RestartPolicy - // Disconnect strategy defines how both clients and server should behave in case of - // disconnection between them. - Disconnect *DisconnectStrategy - // Tasks are the collection of tasks that this task group needs to run Tasks []*Task @@ -6659,18 +6654,14 @@ type TaskGroup struct { // StopAfterClientDisconnect, if set, configures the client to stop the task group // after this duration since the last known good heartbeat - // To be deprecated after 1.8.0 infavor of Disconnect.StopOnClientAfter StopAfterClientDisconnect *time.Duration // MaxClientDisconnect, if set, configures the client to allow placed // allocations for tasks in this group to attempt to resume running without a restart. - // To be deprecated after 1.8.0 infavor of Disconnect.LostAfter MaxClientDisconnect *time.Duration // PreventRescheduleOnLost is used to signal that an allocation should not // be rescheduled if its node goes down or is disconnected. - // To be deprecated after 1.8.0 - // To be deprecated after 1.8.0 infavor of Disconnect.Replace PreventRescheduleOnLost bool } @@ -6683,7 +6674,6 @@ func (tg *TaskGroup) Copy() *TaskGroup { ntg.Update = ntg.Update.Copy() ntg.Constraints = CopySliceConstraints(ntg.Constraints) ntg.RestartPolicy = ntg.RestartPolicy.Copy() - ntg.Disconnect = ntg.Disconnect.Copy() ntg.ReschedulePolicy = ntg.ReschedulePolicy.Copy() ntg.Affinities = CopySliceAffinities(ntg.Affinities) ntg.Spreads = CopySliceSpreads(ntg.Spreads) @@ -6753,22 +6743,6 @@ func (tg *TaskGroup) Canonicalize(job *Job) { tg.ReschedulePolicy = NewReschedulePolicy(job.Type) } - if tg.Disconnect != nil { - tg.Disconnect.Canonicalize() - - if tg.MaxClientDisconnect != nil && tg.Disconnect.LostAfter == 0 { - tg.Disconnect.LostAfter = *tg.MaxClientDisconnect - } - - if tg.StopAfterClientDisconnect != nil && tg.Disconnect.StopOnClientAfter == nil { - tg.Disconnect.StopOnClientAfter = tg.StopAfterClientDisconnect - } - - if tg.PreventRescheduleOnLost && tg.Disconnect.Replace == nil { - tg.Disconnect.Replace = pointer.Of(false) - } - } - // Canonicalize Migrate for service jobs if job.Type == JobTypeService && tg.Migrate == nil { tg.Migrate = DefaultMigrateStrategy() @@ -6833,109 +6807,88 @@ func (tg *TaskGroup) filterServices(f func(s *Service) bool) []*Service { // Validate is used to check a task group for reasonable configuration func (tg *TaskGroup) Validate(j *Job) error { - var mErr *multierror.Error - + var mErr multierror.Error if tg.Name == "" { - mErr = multierror.Append(mErr, errors.New("Missing task group name")) + mErr.Errors = append(mErr.Errors, errors.New("Missing task group name")) } else if strings.Contains(tg.Name, "\000") { - mErr = multierror.Append(mErr, errors.New("Task group name contains null character")) + mErr.Errors = append(mErr.Errors, errors.New("Task group name contains null character")) } - if tg.Count < 0 { - mErr = multierror.Append(mErr, errors.New("Task group count can't be negative")) + mErr.Errors = append(mErr.Errors, errors.New("Task group count can't be negative")) } - if len(tg.Tasks) == 0 { // could be a lone consul gateway inserted by the connect mutator - mErr = multierror.Append(mErr, errors.New("Missing tasks for task group")) + mErr.Errors = append(mErr.Errors, errors.New("Missing tasks for task group")) } if tg.MaxClientDisconnect != nil && tg.StopAfterClientDisconnect != nil { - mErr = multierror.Append(mErr, errors.New("Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect")) + mErr.Errors = append(mErr.Errors, errors.New("Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect")) } if tg.MaxClientDisconnect != nil && *tg.MaxClientDisconnect < 0 { - mErr = multierror.Append(mErr, errors.New("max_client_disconnect cannot be negative")) - } - - if tg.Disconnect != nil { - if tg.MaxClientDisconnect != nil && tg.Disconnect.LostAfter > 0 { - return multierror.Append(mErr, errors.New("using both lost_after and max_client_disconnect is not allowed")) - } - - if tg.StopAfterClientDisconnect != nil && tg.Disconnect.StopOnClientAfter != nil { - return multierror.Append(mErr, errors.New("using both stop_after_client_disconnect and stop_on_client_after is not allowed")) - } - - if tg.PreventRescheduleOnLost && tg.Disconnect.Replace != nil { - return multierror.Append(mErr, errors.New("using both prevent_reschedule_on_lost and replace is not allowed")) - } - - if err := tg.Disconnect.Validate(j); err != nil { - mErr = multierror.Append(mErr, err) - } + mErr.Errors = append(mErr.Errors, errors.New("max_client_disconnect cannot be negative")) } for idx, constr := range tg.Constraints { if err := constr.Validate(); err != nil { outer := fmt.Errorf("Constraint %d validation failed: %s", idx+1, err) - mErr = multierror.Append(mErr, outer) + mErr.Errors = append(mErr.Errors, outer) } } if j.Type == JobTypeSystem { if tg.Affinities != nil { - mErr = multierror.Append(mErr, fmt.Errorf("System jobs may not have an affinity block")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have an affinity block")) } } else { for idx, affinity := range tg.Affinities { if err := affinity.Validate(); err != nil { outer := fmt.Errorf("Affinity %d validation failed: %s", idx+1, err) - mErr = multierror.Append(mErr, outer) + mErr.Errors = append(mErr.Errors, outer) } } } if tg.RestartPolicy != nil { if err := tg.RestartPolicy.Validate(); err != nil { - mErr = multierror.Append(mErr, err) + mErr.Errors = append(mErr.Errors, err) } } else { - mErr = multierror.Append(mErr, fmt.Errorf("Task Group %v should have a restart policy", tg.Name)) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a restart policy", tg.Name)) } if j.Type == JobTypeSystem { if tg.Spreads != nil { - mErr = multierror.Append(mErr, fmt.Errorf("System jobs may not have a spread block")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs may not have a spread block")) } } else { for idx, spread := range tg.Spreads { if err := spread.Validate(); err != nil { outer := fmt.Errorf("Spread %d validation failed: %s", idx+1, err) - mErr = multierror.Append(mErr, outer) + mErr.Errors = append(mErr.Errors, outer) } } } if j.Type == JobTypeSystem { if tg.ReschedulePolicy != nil { - mErr = multierror.Append(mErr, fmt.Errorf("System jobs should not have a reschedule policy")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("System jobs should not have a reschedule policy")) } } else { if tg.ReschedulePolicy != nil { if err := tg.ReschedulePolicy.Validate(); err != nil { - mErr = multierror.Append(mErr, err) + mErr.Errors = append(mErr.Errors, err) } } else { - mErr = multierror.Append(mErr, fmt.Errorf("Task Group %v should have a reschedule policy", tg.Name)) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have a reschedule policy", tg.Name)) } } if tg.EphemeralDisk != nil { if err := tg.EphemeralDisk.Validate(); err != nil { - mErr = multierror.Append(mErr, err) + mErr.Errors = append(mErr.Errors, err) } } else { - mErr = multierror.Append(mErr, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name)) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task Group %v should have an ephemeral disk object", tg.Name)) } // Validate the update strategy @@ -6943,10 +6896,10 @@ func (tg *TaskGroup) Validate(j *Job) error { switch j.Type { case JobTypeService, JobTypeSystem: default: - mErr = multierror.Append(mErr, fmt.Errorf("Job type %q does not allow update block", j.Type)) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow update block", j.Type)) } if err := u.Validate(); err != nil { - mErr = multierror.Append(mErr, err) + mErr.Errors = append(mErr.Errors, err) } } @@ -6955,12 +6908,12 @@ func (tg *TaskGroup) Validate(j *Job) error { case JobTypeService: if tg.Migrate != nil { if err := tg.Migrate.Validate(); err != nil { - mErr = multierror.Append(mErr, err) + mErr.Errors = append(mErr.Errors, err) } } default: if tg.Migrate != nil { - mErr = multierror.Append(mErr, fmt.Errorf("Job type %q does not allow migrate block", j.Type)) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Job type %q does not allow migrate block", j.Type)) } } @@ -6969,9 +6922,9 @@ func (tg *TaskGroup) Validate(j *Job) error { leaderTasks := 0 for idx, task := range tg.Tasks { if task.Name == "" { - mErr = multierror.Append(mErr, fmt.Errorf("Task %d missing name", idx+1)) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d missing name", idx+1)) } else if existing, ok := tasks[task.Name]; ok { - mErr = multierror.Append(mErr, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1)) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Task %d redefines '%s' from task %d", idx+1, task.Name, existing+1)) } else { tasks[task.Name] = idx } @@ -6982,7 +6935,7 @@ func (tg *TaskGroup) Validate(j *Job) error { } if leaderTasks > 1 { - mErr = multierror.Append(mErr, fmt.Errorf("Only one task may be marked as leader")) + mErr.Errors = append(mErr.Errors, fmt.Errorf("Only one task may be marked as leader")) } // Validate the volume requests @@ -6992,7 +6945,7 @@ func (tg *TaskGroup) Validate(j *Job) error { } for name, volReq := range tg.Volumes { if err := volReq.Validate(j.Type, tg.Count, canaries); err != nil { - mErr = multierror.Append(mErr, fmt.Errorf( + mErr.Errors = append(mErr.Errors, fmt.Errorf( "Task group volume validation for %s failed: %v", name, err)) } } @@ -7000,32 +6953,32 @@ func (tg *TaskGroup) Validate(j *Job) error { // Validate task group and task network resources if err := tg.validateNetworks(); err != nil { outer := fmt.Errorf("Task group network validation failed: %v", err) - mErr = multierror.Append(mErr, outer) + mErr.Errors = append(mErr.Errors, outer) } // Validate task group and task services if err := tg.validateServices(); err != nil { outer := fmt.Errorf("Task group service validation failed: %v", err) - mErr = multierror.Append(mErr, outer) + mErr.Errors = append(mErr.Errors, outer) } // Validate group service script-checks if err := tg.validateScriptChecksInGroupServices(); err != nil { outer := fmt.Errorf("Task group service check validation failed: %v", err) - mErr = multierror.Append(mErr, outer) + mErr.Errors = append(mErr.Errors, outer) } // Validate the scaling policy if err := tg.validateScalingPolicy(j); err != nil { outer := fmt.Errorf("Task group scaling policy validation failed: %v", err) - mErr = multierror.Append(mErr, outer) + mErr.Errors = append(mErr.Errors, outer) } // Validate the tasks for _, task := range tg.Tasks { if err := task.Validate(j.Type, tg); err != nil { outer := fmt.Errorf("Task %s validation failed: %v", task.Name, err) - mErr = multierror.Append(mErr, outer) + mErr.Errors = append(mErr.Errors, outer) } } @@ -7324,18 +7277,6 @@ func (tg *TaskGroup) Warnings(j *Job) error { } } - if tg.MaxClientDisconnect != nil { - mErr.Errors = append(mErr.Errors, errors.New("MaxClientDisconnect will be deprecated favor of Disconnect.LostAfter")) - } - - if tg.StopAfterClientDisconnect != nil { - mErr.Errors = append(mErr.Errors, errors.New("StopAfterClientDisconnect will be deprecated favor of Disconnect.StopOnClientAfter")) - } - - if tg.PreventRescheduleOnLost { - mErr.Errors = append(mErr.Errors, errors.New("PreventRescheduleOnLost will be deprecated favor of Disconnect.Replace")) - } - // Check for mbits network field if len(tg.Networks) > 0 && tg.Networks[0].MBits > 0 { mErr.Errors = append(mErr.Errors, fmt.Errorf("mbits has been deprecated as of Nomad 0.12.0. Please remove mbits from the network block")) @@ -7402,51 +7343,6 @@ func (tg *TaskGroup) GoString() string { return fmt.Sprintf("*%#v", *tg) } -// Replace is a helper meant to simplify the future depracation of -// PreventRescheduleOnLost in favor of Disconnect.Replace -// introduced in 1.8.0. -func (tg *TaskGroup) Replace() bool { - if tg.PreventRescheduleOnLost { - return false - } - - if tg.Disconnect == nil || tg.Disconnect.Replace == nil { - return true - } - - return *tg.Disconnect.Replace -} - -// GetDisconnectLostTimeout is a helper meant to simplify the future depracation of -// MaxClientDisconnect in favor of Disconnect.LostAfter -// introduced in 1.8.0. -func (tg *TaskGroup) GetDisconnectLostTimeout() time.Duration { - if tg.MaxClientDisconnect != nil { - return *tg.MaxClientDisconnect - } - - if tg.Disconnect != nil { - return tg.Disconnect.LostAfter - } - - return 0 -} - -// GetDisconnectStopTimeout is a helper meant to simplify the future depracation of -// StopAfterClientDisconnect in favor of Disconnect.StopOnClientAfter -// introduced in 1.8.0. -func (tg *TaskGroup) GetDisconnectStopTimeout() *time.Duration { - if tg.StopAfterClientDisconnect != nil { - return tg.StopAfterClientDisconnect - } - - if tg.Disconnect != nil && tg.Disconnect.StopOnClientAfter != nil { - return tg.Disconnect.StopOnClientAfter - } - - return nil -} - // CheckRestart describes if and when a task should be restarted based on // failing health checks. type CheckRestart struct { @@ -8035,12 +7931,15 @@ func (t *Task) Validate(jobType string, tg *TaskGroup) error { // Validation for volumes for idx, vm := range t.VolumeMounts { - if _, ok := tg.Volumes[vm.Volume]; !ok { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) references undefined volume %s", idx, vm.Volume)) + if !MountPropagationModeIsValid(vm.PropagationMode) { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) has an invalid propagation mode: \"%s\"", idx, vm.PropagationMode)) } - if err := vm.Validate(); err != nil { - mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) is invalid: \"%w\"", idx, err)) + // Validate the task does not reference undefined volume mounts + if vm.Volume == "" { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) references an empty volume", idx)) + } else if _, ok := tg.Volumes[vm.Volume]; !ok { + mErr.Errors = append(mErr.Errors, fmt.Errorf("Volume Mount (%d) references undefined volume %s", idx, vm.Volume)) } } @@ -9449,10 +9348,6 @@ type TaskArtifact struct { // Defaults to "any" but can be set to "file" or "dir". GetterMode string - // GetterInsecure is a flag to disable SSL certificate verification when - // downloading the artifact using go-getter. - GetterInsecure bool - // RelativeDest is the download destination given relative to the task's // directory. RelativeDest string @@ -9471,8 +9366,6 @@ func (ta *TaskArtifact) Equal(o *TaskArtifact) bool { return false case ta.GetterMode != o.GetterMode: return false - case ta.GetterInsecure != o.GetterInsecure: - return false case ta.RelativeDest != o.RelativeDest: return false } @@ -9484,12 +9377,11 @@ func (ta *TaskArtifact) Copy() *TaskArtifact { return nil } return &TaskArtifact{ - GetterSource: ta.GetterSource, - GetterOptions: maps.Clone(ta.GetterOptions), - GetterHeaders: maps.Clone(ta.GetterHeaders), - GetterMode: ta.GetterMode, - GetterInsecure: ta.GetterInsecure, - RelativeDest: ta.RelativeDest, + GetterSource: ta.GetterSource, + GetterOptions: maps.Clone(ta.GetterOptions), + GetterHeaders: maps.Clone(ta.GetterHeaders), + GetterMode: ta.GetterMode, + RelativeDest: ta.RelativeDest, } } @@ -9529,7 +9421,6 @@ func (ta *TaskArtifact) Hash() string { hashStringMap(h, ta.GetterHeaders) _, _ = h.Write([]byte(ta.GetterMode)) - _, _ = h.Write([]byte(strconv.FormatBool(ta.GetterInsecure))) _, _ = h.Write([]byte(ta.RelativeDest)) return base64.RawStdEncoding.EncodeToString(h.Sum(nil)) } @@ -11082,24 +10973,12 @@ func (a *Allocation) NextRescheduleTimeByTime(t time.Time) (time.Time, bool) { return a.nextRescheduleTime(t, reschedulePolicy) } -func (a *Allocation) RescheduleTimeOnDisconnect(now time.Time) (time.Time, bool) { - tg := a.Job.LookupTaskGroup(a.TaskGroup) - if tg == nil || tg.Disconnect == nil || tg.Disconnect.Replace == nil { - // Kept to maintain backwards compatibility with behavior prior to 1.8.0 - return a.NextRescheduleTimeByTime(now) - } - - return now, *tg.Disconnect.Replace -} - -// ShouldClientStop tests an alloc for StopAfterClient on the Disconnect configuration +// ShouldClientStop tests an alloc for StopAfterClientDisconnect configuration func (a *Allocation) ShouldClientStop() bool { tg := a.Job.LookupTaskGroup(a.TaskGroup) - timeout := tg.GetDisconnectStopTimeout() - if tg == nil || - timeout == nil || - *timeout == 0*time.Nanosecond { + tg.StopAfterClientDisconnect == nil || + *tg.StopAfterClientDisconnect == 0*time.Nanosecond { return false } return true @@ -11134,7 +11013,7 @@ func (a *Allocation) WaitClientStop() time.Time { } } - return t.Add(*tg.GetDisconnectStopTimeout() + kill) + return t.Add(*tg.StopAfterClientDisconnect + kill) } // DisconnectTimeout uses the MaxClientDisconnect to compute when the allocation @@ -11146,12 +11025,12 @@ func (a *Allocation) DisconnectTimeout(now time.Time) time.Time { tg := a.Job.LookupTaskGroup(a.TaskGroup) - timeout := tg.GetDisconnectLostTimeout() - if timeout == 0 { + timeout := tg.MaxClientDisconnect + if timeout == nil { return now } - return now.Add(timeout) + return now.Add(*timeout) } // SupportsDisconnectedClients determines whether both the server and the task group @@ -11165,7 +11044,7 @@ func (a *Allocation) SupportsDisconnectedClients(serverSupportsDisconnectedClien if a.Job != nil { tg := a.Job.LookupTaskGroup(a.TaskGroup) if tg != nil { - return tg.GetDisconnectLostTimeout() != 0 + return tg.MaxClientDisconnect != nil } } @@ -11173,14 +11052,12 @@ func (a *Allocation) SupportsDisconnectedClients(serverSupportsDisconnectedClien } // PreventRescheduleOnLost determines if an alloc allows to have a replacement -// when Disconnected. -func (a *Allocation) PreventRescheduleOnDisconnect() bool { +// when lost. +func (a *Allocation) PreventRescheduleOnLost() bool { if a.Job != nil { tg := a.Job.LookupTaskGroup(a.TaskGroup) if tg != nil { - return (tg.Disconnect != nil && tg.Disconnect.Replace != nil && - !*tg.Disconnect.Replace) || - tg.PreventRescheduleOnLost + return tg.PreventRescheduleOnLost } } @@ -11378,7 +11255,7 @@ func (a *Allocation) AllocationDiff() *AllocationDiff { return (*AllocationDiff)(a) } -// Expired determines whether an allocation has exceeded its Disconnect.LostAfter +// Expired determines whether an allocation has exceeded its MaxClientDisonnect // duration relative to the passed time stamp. func (a *Allocation) Expired(now time.Time) bool { if a == nil || a.Job == nil { @@ -11400,12 +11277,11 @@ func (a *Allocation) Expired(now time.Time) bool { return false } - timeout := tg.GetDisconnectLostTimeout() - if timeout == 0 && tg.Replace() { + if tg.MaxClientDisconnect == nil && !tg.PreventRescheduleOnLost { return false } - expiry := lastUnknown.Add(timeout) + expiry := lastUnknown.Add(*tg.MaxClientDisconnect) return expiry.Sub(now) <= 0 } @@ -11446,22 +11322,6 @@ func (a *Allocation) NeedsToReconnect() bool { return disconnected } -// LastStartOfTask returns the time of the last start event for the given task -// using the allocations TaskStates. If the task has not started, the zero time -// will be returned. -func (a *Allocation) LastStartOfTask(taskName string) time.Time { - task := a.TaskStates[taskName] - if task == nil { - return time.Time{} - } - - if task.Restarts > 0 { - return task.LastRestart - } - - return task.StartedAt -} - // IdentityClaims are the input to a JWT identifying a workload. It // should never be serialized to msgpack unsigned. type IdentityClaims struct { diff --git a/nomad/structs/structs_test.go b/nomad/structs/structs_test.go index 92253da18c44..05ff3081de42 100644 --- a/nomad/structs/structs_test.go +++ b/nomad/structs/structs_test.go @@ -470,6 +470,39 @@ func TestJob_ValidateNullChar(t *testing.T) { assert.Error(job.Validate(), "null character in task name should not validate") } +func TestJob_Validate_DisconnectRescheduleLost(t *testing.T) { + ci.Parallel(t) + + // Craft our speciality jobspec to test this particular use-case. + testDisconnectRescheduleLostJob := &Job{ + ID: "gh19644", + Name: "gh19644", + Region: "global", + Type: JobTypeSystem, + TaskGroups: []*TaskGroup{ + { + Name: "cache", + MaxClientDisconnect: pointer.Of(1 * time.Hour), + PreventRescheduleOnLost: true, + Tasks: []*Task{ + { + Name: "redis", + Driver: "docker", + Config: map[string]interface{}{ + "image": "redis:7", + }, + LogConfig: DefaultLogConfig(), + }, + }, + }, + }, + } + + testDisconnectRescheduleLostJob.Canonicalize() + + must.NoError(t, testDisconnectRescheduleLostJob.Validate()) +} + func TestJob_Warnings(t *testing.T) { ci.Parallel(t) @@ -1592,7 +1625,7 @@ func TestTaskGroup_Validate(t *testing.T) { }, }, expErr: []string{ - `Volume Mount (0) references undefined volume`, + `Volume Mount (0) references an empty volume`, `Volume Mount (0) references undefined volume foob`, }, jobType: JobTypeService, @@ -2010,24 +2043,6 @@ func TestTask_Validate(t *testing.T) { t.Fatalf("err: %s", err) } - tg.Volumes = map[string]*VolumeRequest{ - "foo": { - Name: "foo", - }, - } - - task.VolumeMounts = []*VolumeMount{ - { - Volume: "blah", - }, - } - - err = task.Validate(JobTypeBatch, tg) - requireErrors(t, err, - "Volume Mount (0) references undefined volume blah", - ) - task.VolumeMounts = nil - task.Constraints = append(task.Constraints, &Constraint{ Operand: ConstraintDistinctHosts, @@ -4785,16 +4800,6 @@ func TestTaskArtifact_Hash(t *testing.T) { GetterMode: "g", RelativeDest: "i", }, - { - GetterSource: "b", - GetterOptions: map[string]string{ - "c": "c", - "d": "e", - }, - GetterMode: "g", - GetterInsecure: true, - RelativeDest: "i", - }, } // Map of hash to source @@ -5797,6 +5802,248 @@ func TestAllocation_NextDelay(t *testing.T) { } +func TestAllocation_WaitClientStop(t *testing.T) { + ci.Parallel(t) + type testCase struct { + desc string + stop time.Duration + status string + expectedShould bool + expectedRescheduleTime time.Time + } + now := time.Now().UTC() + testCases := []testCase{ + { + desc: "running", + stop: 2 * time.Second, + status: AllocClientStatusRunning, + expectedShould: true, + }, + { + desc: "no stop_after_client_disconnect", + status: AllocClientStatusLost, + expectedShould: false, + }, + { + desc: "stop", + status: AllocClientStatusLost, + stop: 2 * time.Second, + expectedShould: true, + expectedRescheduleTime: now.Add((2 + 5) * time.Second), + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + j := testJob() + a := &Allocation{ + ClientStatus: tc.status, + Job: j, + TaskStates: map[string]*TaskState{}, + } + + if tc.status == AllocClientStatusLost { + a.AppendState(AllocStateFieldClientStatus, AllocClientStatusLost) + } + + j.TaskGroups[0].StopAfterClientDisconnect = &tc.stop + a.TaskGroup = j.TaskGroups[0].Name + + require.Equal(t, tc.expectedShould, a.ShouldClientStop()) + + if !tc.expectedShould || tc.status != AllocClientStatusLost { + return + } + + // the reschedTime is close to the expectedRescheduleTime + reschedTime := a.WaitClientStop() + e := reschedTime.Unix() - tc.expectedRescheduleTime.Unix() + require.Less(t, e, int64(2)) + }) + } +} + +func TestAllocation_DisconnectTimeout(t *testing.T) { + type testCase struct { + desc string + maxDisconnect *time.Duration + } + + testCases := []testCase{ + { + desc: "no max_client_disconnect", + maxDisconnect: nil, + }, + { + desc: "has max_client_disconnect", + maxDisconnect: pointer.Of(30 * time.Second), + }, + { + desc: "zero max_client_disconnect", + maxDisconnect: pointer.Of(0 * time.Second), + }, + } + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + j := testJob() + a := &Allocation{ + Job: j, + } + + j.TaskGroups[0].MaxClientDisconnect = tc.maxDisconnect + a.TaskGroup = j.TaskGroups[0].Name + + now := time.Now() + + reschedTime := a.DisconnectTimeout(now) + + if tc.maxDisconnect == nil { + require.Equal(t, now, reschedTime, "expected to be now") + } else { + difference := reschedTime.Sub(now) + require.Equal(t, *tc.maxDisconnect, difference, "expected durations to be equal") + } + + }) + } +} + +func TestAllocation_Expired(t *testing.T) { + type testCase struct { + name string + maxDisconnect string + ellapsed int + expected bool + nilJob bool + badTaskGroup bool + mixedUTC bool + noReconnectEvent bool + status string + } + + testCases := []testCase{ + { + name: "has-expired", + maxDisconnect: "5s", + ellapsed: 10, + expected: true, + }, + { + name: "has-not-expired", + maxDisconnect: "5s", + ellapsed: 3, + expected: false, + }, + { + name: "are-equal", + maxDisconnect: "5s", + ellapsed: 5, + expected: true, + }, + { + name: "nil-job", + maxDisconnect: "5s", + ellapsed: 10, + expected: false, + nilJob: true, + }, + { + name: "wrong-status", + maxDisconnect: "5s", + ellapsed: 10, + expected: false, + status: AllocClientStatusRunning, + }, + { + name: "bad-task-group", + maxDisconnect: "", + badTaskGroup: true, + ellapsed: 10, + expected: false, + }, + { + name: "no-max-disconnect", + maxDisconnect: "", + ellapsed: 10, + expected: false, + }, + { + name: "mixed-utc-has-expired", + maxDisconnect: "5s", + ellapsed: 10, + mixedUTC: true, + expected: true, + }, + { + name: "mixed-utc-has-not-expired", + maxDisconnect: "5s", + ellapsed: 3, + mixedUTC: true, + expected: false, + }, + { + name: "no-reconnect-event", + maxDisconnect: "5s", + ellapsed: 2, + expected: false, + noReconnectEvent: true, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + alloc := MockAlloc() + var err error + var maxDisconnect time.Duration + + if tc.maxDisconnect != "" { + maxDisconnect, err = time.ParseDuration(tc.maxDisconnect) + require.NoError(t, err) + alloc.Job.TaskGroups[0].MaxClientDisconnect = &maxDisconnect + } + + if tc.nilJob { + alloc.Job = nil + } + + if tc.badTaskGroup { + alloc.TaskGroup = "bad" + } + + alloc.ClientStatus = AllocClientStatusUnknown + if tc.status != "" { + alloc.ClientStatus = tc.status + } + + alloc.AllocStates = []*AllocState{{ + Field: AllocStateFieldClientStatus, + Value: AllocClientStatusUnknown, + Time: time.Now(), + }} + + require.NoError(t, err) + now := time.Now().UTC() + if tc.mixedUTC { + now = time.Now() + } + + if !tc.noReconnectEvent { + event := NewTaskEvent(TaskClientReconnected) + event.Time = now.UnixNano() + + alloc.TaskStates = map[string]*TaskState{ + "web": { + Events: []*TaskEvent{event}, + }, + } + } + + ellapsedDuration := time.Duration(tc.ellapsed) * time.Second + now = now.Add(ellapsedDuration) + + require.Equal(t, tc.expected, alloc.Expired(now)) + }) + } +} + func TestAllocation_NeedsToReconnect(t *testing.T) { ci.Parallel(t) @@ -5901,123 +6148,6 @@ func TestAllocation_NeedsToReconnect(t *testing.T) { } } -func TestAllocation_RescheduleTimeOnDisconnect(t *testing.T) { - ci.Parallel(t) - testNow := time.Now() - - testAlloc := MockAlloc() - - testCases := []struct { - name string - taskGroup string - disconnectGroup *DisconnectStrategy - expected bool - expectedTime time.Time - }{ - { - name: "missing_task_group", - taskGroup: "missing-task-group", - expected: false, - expectedTime: time.Time{}, - }, - { - name: "missing_disconnect_group", - taskGroup: "web", - disconnectGroup: nil, - expected: true, - expectedTime: testNow.Add(RestartPolicyMinInterval), // RestartPolicyMinInterval is the default value - }, - { - name: "empty_disconnect_group", - taskGroup: "web", - disconnectGroup: &DisconnectStrategy{}, - expected: true, - expectedTime: testNow.Add(RestartPolicyMinInterval), // RestartPolicyMinInterval is the default value - }, - { - name: "replace_enabled", - taskGroup: "web", - disconnectGroup: &DisconnectStrategy{ - Replace: pointer.Of(true), - }, - expected: true, - expectedTime: testNow, - }, - { - name: "replace_disabled", - taskGroup: "web", - disconnectGroup: &DisconnectStrategy{ - Replace: pointer.Of(false), - }, - expected: false, - expectedTime: testNow, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - alloc := testAlloc.Copy() - - alloc.TaskGroup = tc.taskGroup - alloc.Job.TaskGroups[0].Disconnect = tc.disconnectGroup - - time, eligible := alloc.RescheduleTimeOnDisconnect(testNow) - - must.Eq(t, tc.expected, eligible) - must.Eq(t, tc.expectedTime, time) - }) - } -} - -func TestAllocation_LastStartOfTask(t *testing.T) { - ci.Parallel(t) - testNow := time.Now() - - alloc := MockAlloc() - alloc.TaskStates = map[string]*TaskState{ - "task-with-restarts": { - StartedAt: testNow.Add(-30 * time.Minute), - Restarts: 3, - LastRestart: testNow.Add(-5 * time.Minute), - }, - "task-without-restarts": { - StartedAt: testNow.Add(-30 * time.Minute), - Restarts: 0, - }, - } - - testCases := []struct { - name string - taskName string - expected time.Time - }{ - { - name: "missing_task", - taskName: "missing-task", - expected: time.Time{}, - }, - { - name: "task_with_restarts", - taskName: "task-with-restarts", - expected: testNow.Add(-5 * time.Minute), - }, - { - name: "task_without_restarts", - taskName: "task-without-restarts", - expected: testNow.Add(-30 * time.Minute), - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - alloc.TaskGroup = "web" - got := alloc.LastStartOfTask(tc.taskName) - - must.Eq(t, tc.expected, got) - }) - } -} - func TestAllocation_Canonicalize_Old(t *testing.T) { ci.Parallel(t) @@ -6196,6 +6326,54 @@ func TestParameterizedJobConfig_Validate_NonBatch(t *testing.T) { } } +func TestJobConfig_Validate_StopAferClientDisconnect(t *testing.T) { + ci.Parallel(t) + // Setup a system Job with stop_after_client_disconnect set, which is invalid + job := testJob() + job.Type = JobTypeSystem + stop := 1 * time.Minute + job.TaskGroups[0].StopAfterClientDisconnect = &stop + + err := job.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), "stop_after_client_disconnect can only be set in batch and service jobs") + + // Modify the job to a batch job with an invalid stop_after_client_disconnect value + job.Type = JobTypeBatch + invalid := -1 * time.Minute + job.TaskGroups[0].StopAfterClientDisconnect = &invalid + + err = job.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), "stop_after_client_disconnect must be a positive value") + + // Modify the job to a batch job with a valid stop_after_client_disconnect value + job.Type = JobTypeBatch + job.TaskGroups[0].StopAfterClientDisconnect = &stop + err = job.Validate() + require.NoError(t, err) +} + +func TestJobConfig_Validate_MaxClientDisconnect(t *testing.T) { + // Set up a job with an invalid max_client_disconnect value + job := testJob() + timeout := -1 * time.Minute + job.TaskGroups[0].MaxClientDisconnect = &timeout + job.TaskGroups[0].StopAfterClientDisconnect = &timeout + + err := job.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), "max_client_disconnect cannot be negative") + require.Contains(t, err.Error(), "Task group cannot be configured with both max_client_disconnect and stop_after_client_disconnect") + + // Modify the job with a valid max_client_disconnect value + timeout = 1 * time.Minute + job.TaskGroups[0].MaxClientDisconnect = &timeout + job.TaskGroups[0].StopAfterClientDisconnect = nil + err = job.Validate() + require.NoError(t, err) +} + func TestParameterizedJobConfig_Canonicalize(t *testing.T) { ci.Parallel(t) diff --git a/nomad/structs/volume_test.go b/nomad/structs/volume_test.go index 02e0715d1a39..fc26702ee9fe 100644 --- a/nomad/structs/volume_test.go +++ b/nomad/structs/volume_test.go @@ -4,7 +4,6 @@ package structs import ( - "errors" "testing" "github.com/hashicorp/nomad/ci" @@ -169,63 +168,3 @@ func TestVolumeMount_Equal(t *testing.T) { Apply: func(vm *VolumeMount) { vm.PropagationMode = "mode2" }, }}) } - -func TestVolumeMount_Validate(t *testing.T) { - ci.Parallel(t) - - testCases := []struct { - name string - expectedErr error - volMount *VolumeMount - }{ - { - name: "valid volume mount", - volMount: &VolumeMount{ - Volume: "vol", - }, - expectedErr: nil, - }, - { - name: "empty volume reference", - volMount: &VolumeMount{ - Volume: "", - }, - expectedErr: errVolMountEmptyVol, - }, - { - name: "invalid propagation mode", - volMount: &VolumeMount{ - Volume: "vol", - PropagationMode: "very invalid propagation mode", - }, - expectedErr: errVolMountInvalidPropagationMode, - }, - { - name: "invalid selinux label", - volMount: &VolumeMount{ - Volume: "vol", - PropagationMode: VolumeMountPropagationPrivate, - SELinuxLabel: "very invalid selinux label", - }, - expectedErr: errVolMountInvalidSELinuxLabel, - }, - { - name: "full valid volume mont", - volMount: &VolumeMount{ - Volume: "vol", - PropagationMode: VolumeMountPropagationPrivate, - SELinuxLabel: SELinuxPrivateVolume, - }, - expectedErr: nil, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - err := tc.volMount.Validate() - if !errors.Is(err, tc.expectedErr) { - t.Fatalf("expected error %v, got %v", tc.expectedErr, err) - } - }) - } -} diff --git a/nomad/structs/volumes.go b/nomad/structs/volumes.go index daacd5d8670b..08e8ae658f9c 100644 --- a/nomad/structs/volumes.go +++ b/nomad/structs/volumes.go @@ -11,20 +11,22 @@ import ( const ( VolumeTypeHost = "host" +) +const ( VolumeMountPropagationPrivate = "private" VolumeMountPropagationHostToTask = "host-to-task" VolumeMountPropagationBidirectional = "bidirectional" - - SELinuxSharedVolume = "z" - SELinuxPrivateVolume = "Z" ) -var ( - errVolMountInvalidPropagationMode = fmt.Errorf("volume mount has an invalid propagation mode") - errVolMountInvalidSELinuxLabel = fmt.Errorf("volume mount has an invalid SELinux label") - errVolMountEmptyVol = fmt.Errorf("volume mount references an empty volume") -) +func MountPropagationModeIsValid(propagationMode string) bool { + switch propagationMode { + case "", VolumeMountPropagationPrivate, VolumeMountPropagationHostToTask, VolumeMountPropagationBidirectional: + return true + default: + return false + } +} // ClientHostVolumeConfig is used to configure access to host paths on a Nomad Client type ClientHostVolumeConfig struct { @@ -248,12 +250,6 @@ type VolumeMount struct { Destination string ReadOnly bool PropagationMode string - SELinuxLabel string -} - -// Hash is a very basic string based implementation of a hasher. -func (v *VolumeMount) Hash() string { - return fmt.Sprintf("%#+v", v) } func (v *VolumeMount) Equal(o *VolumeMount) bool { @@ -269,10 +265,7 @@ func (v *VolumeMount) Equal(o *VolumeMount) bool { return false case v.PropagationMode != o.PropagationMode: return false - case v.SELinuxLabel != o.SELinuxLabel: - return false } - return true } @@ -286,43 +279,6 @@ func (v *VolumeMount) Copy() *VolumeMount { return nv } -func (v *VolumeMount) Validate() error { - var mErr *multierror.Error - - // Validate the task does not reference undefined volume mounts - if v.Volume == "" { - mErr = multierror.Append(mErr, errVolMountEmptyVol) - } - - if !v.MountPropagationModeIsValid() { - mErr = multierror.Append(mErr, fmt.Errorf("%w: %q", errVolMountInvalidPropagationMode, v.PropagationMode)) - } - - if !v.SELinuxLabelIsValid() { - mErr = multierror.Append(mErr, fmt.Errorf("%w: \"%s\"", errVolMountInvalidSELinuxLabel, v.SELinuxLabel)) - } - - return mErr.ErrorOrNil() -} - -func (v *VolumeMount) MountPropagationModeIsValid() bool { - switch v.PropagationMode { - case "", VolumeMountPropagationPrivate, VolumeMountPropagationHostToTask, VolumeMountPropagationBidirectional: - return true - default: - return false - } -} - -func (v *VolumeMount) SELinuxLabelIsValid() bool { - switch v.SELinuxLabel { - case "", SELinuxSharedVolume, SELinuxPrivateVolume: - return true - default: - return false - } -} - func CopySliceVolumeMount(s []*VolumeMount) []*VolumeMount { l := len(s) if l == 0 { diff --git a/nomad/system_endpoint_test.go b/nomad/system_endpoint_test.go index c1467adefbce..ecc54bbc4154 100644 --- a/nomad/system_endpoint_test.go +++ b/nomad/system_endpoint_test.go @@ -9,7 +9,7 @@ import ( "testing" memdb "github.com/hashicorp/go-memdb" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/hashicorp/nomad/acl" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/mock" diff --git a/nomad/timetable.go b/nomad/timetable.go index cc99d6f2c14e..215135b22fc2 100644 --- a/nomad/timetable.go +++ b/nomad/timetable.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" ) // TimeTable is used to associate a Raft index with a timestamp. diff --git a/nomad/timetable_test.go b/nomad/timetable_test.go index 5396218fab92..6d4c3f0761b9 100644 --- a/nomad/timetable_test.go +++ b/nomad/timetable_test.go @@ -11,7 +11,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" ) diff --git a/nomad/variables_endpoint_test.go b/nomad/variables_endpoint_test.go index 33fd63651758..b3dc05c075d3 100644 --- a/nomad/variables_endpoint_test.go +++ b/nomad/variables_endpoint_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc/v2" + msgpackrpc "github.com/hashicorp/net-rpc-msgpackrpc" "github.com/shoenig/test/must" "github.com/hashicorp/nomad/acl" diff --git a/plugins/base/plugin.go b/plugins/base/plugin.go index 61e232d8d368..b75869944c48 100644 --- a/plugins/base/plugin.go +++ b/plugins/base/plugin.go @@ -8,7 +8,7 @@ import ( "context" "reflect" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" plugin "github.com/hashicorp/go-plugin" "github.com/hashicorp/nomad/plugins/base/proto" "google.golang.org/grpc" diff --git a/plugins/base/plugin_test.go b/plugins/base/plugin_test.go index a8b374c2ebef..840ab116c441 100644 --- a/plugins/base/plugin_test.go +++ b/plugins/base/plugin_test.go @@ -11,13 +11,14 @@ import ( "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/shared/hclspec" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" "github.com/zclconf/go-cty/cty" "github.com/zclconf/go-cty/cty/msgpack" ) func TestBasePlugin_PluginInfo_GRPC(t *testing.T) { ci.Parallel(t) + require := require.New(t) var ( apiVersions = []string{"v0.1.0", "v0.1.1"} @@ -68,20 +69,22 @@ func TestBasePlugin_PluginInfo_GRPC(t *testing.T) { } resp, err := impl.PluginInfo() - must.NoError(t, err) - must.Eq(t, apiVersions, resp.PluginApiVersions) - must.Eq(t, pluginVersion, resp.PluginVersion) - must.Eq(t, pluginName, resp.Name) - must.Eq(t, PluginTypeDriver, resp.Type) + require.NoError(err) + require.Equal(apiVersions, resp.PluginApiVersions) + require.Equal(pluginVersion, resp.PluginVersion) + require.Equal(pluginName, resp.Name) + require.Equal(PluginTypeDriver, resp.Type) // Swap the implementation to return an unknown type mock.PluginInfoF = unknownType _, err = impl.PluginInfo() - must.ErrorContains(t, err, "unknown type") + require.Error(err) + require.Contains(err.Error(), "unknown type") } func TestBasePlugin_ConfigSchema(t *testing.T) { ci.Parallel(t) + require := require.New(t) mock := &MockPlugin{ ConfigSchemaF: func() (*hclspec.Spec, error) { @@ -96,18 +99,23 @@ func TestBasePlugin_ConfigSchema(t *testing.T) { defer client.Close() raw, err := client.Dispense(PluginTypeBase) - must.NoError(t, err) + if err != nil { + t.Fatalf("err: %s", err) + } impl, ok := raw.(BasePlugin) - must.True(t, ok) + if !ok { + t.Fatalf("bad: %#v", raw) + } specOut, err := impl.ConfigSchema() - must.NoError(t, err) - must.True(t, pb.Equal(TestSpec, specOut)) + require.NoError(err) + require.True(pb.Equal(TestSpec, specOut)) } func TestBasePlugin_SetConfig(t *testing.T) { ci.Parallel(t) + require := require.New(t) var receivedData []byte mock := &MockPlugin{ @@ -130,25 +138,29 @@ func TestBasePlugin_SetConfig(t *testing.T) { defer client.Close() raw, err := client.Dispense(PluginTypeBase) - must.NoError(t, err) + if err != nil { + t.Fatalf("err: %s", err) + } + impl, ok := raw.(BasePlugin) - must.True(t, ok) + if !ok { + t.Fatalf("bad: %#v", raw) + } config := cty.ObjectVal(map[string]cty.Value{ "foo": cty.StringVal("v1"), "bar": cty.NumberIntVal(1337), "baz": cty.BoolVal(true), }) - cdata, err := msgpack.Marshal(config, config.Type()) - must.NoError(t, err) - must.NoError(t, impl.SetConfig(&Config{PluginConfig: cdata})) - must.Eq(t, cdata, receivedData) + require.NoError(err) + require.NoError(impl.SetConfig(&Config{PluginConfig: cdata})) + require.Equal(cdata, receivedData) // Decode the value back var actual TestConfig - must.NoError(t, structs.Decode(receivedData, &actual)) - must.Eq(t, "v1", actual.Foo) - must.Eq(t, 1337, actual.Bar) - must.True(t, actual.Baz) + require.NoError(structs.Decode(receivedData, &actual)) + require.Equal("v1", actual.Foo) + require.EqualValues(1337, actual.Bar) + require.True(actual.Baz) } diff --git a/plugins/csi/client_test.go b/plugins/csi/client_test.go index 9ce8eb5729c9..45bfb47832b2 100644 --- a/plugins/csi/client_test.go +++ b/plugins/csi/client_test.go @@ -14,6 +14,7 @@ import ( csipbv1 "github.com/container-storage-interface/spec/lib/go/csi" "github.com/golang/protobuf/ptypes/wrappers" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -101,10 +102,10 @@ func TestClient_RPC_PluginProbe(t *testing.T) { resp, err := client.PluginProbe(context.TODO()) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } - must.Eq(t, tc.ExpectedResponse, resp) + require.Equal(t, tc.ExpectedResponse, resp) }) } @@ -155,11 +156,11 @@ func TestClient_RPC_PluginInfo(t *testing.T) { name, version, err := client.PluginGetInfo(context.TODO()) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } - must.Eq(t, tc.ExpectedResponseName, name) - must.Eq(t, tc.ExpectedResponseVersion, version) + require.Equal(t, tc.ExpectedResponseName, name) + require.Equal(t, tc.ExpectedResponseVersion, version) }) } @@ -222,10 +223,10 @@ func TestClient_RPC_PluginGetCapabilities(t *testing.T) { resp, err := client.PluginGetCapabilities(context.TODO()) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } - must.Eq(t, tc.ExpectedResponse, resp) + require.Equal(t, tc.ExpectedResponse, resp) }) } } @@ -322,10 +323,10 @@ func TestClient_RPC_ControllerGetCapabilities(t *testing.T) { resp, err := client.ControllerGetCapabilities(context.TODO()) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } - must.Eq(t, tc.ExpectedResponse, resp) + require.Equal(t, tc.ExpectedResponse, resp) }) } } @@ -382,10 +383,10 @@ func TestClient_RPC_NodeGetCapabilities(t *testing.T) { resp, err := client.NodeGetCapabilities(context.TODO()) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } - must.Eq(t, tc.ExpectedResponse, resp) + require.Equal(t, tc.ExpectedResponse, resp) }) } } @@ -449,10 +450,10 @@ func TestClient_RPC_ControllerPublishVolume(t *testing.T) { resp, err := client.ControllerPublishVolume(context.TODO(), tc.Request) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } - must.Eq(t, tc.ExpectedResponse, resp) + require.Equal(t, tc.ExpectedResponse, resp) }) } } @@ -497,10 +498,10 @@ func TestClient_RPC_ControllerUnpublishVolume(t *testing.T) { resp, err := client.ControllerUnpublishVolume(context.TODO(), tc.Request) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } - must.Eq(t, tc.ExpectedResponse, resp) + require.Equal(t, tc.ExpectedResponse, resp) }) } } @@ -722,9 +723,9 @@ func TestClient_RPC_ControllerValidateVolume(t *testing.T) { err := client.ControllerValidateCapabilities(context.TODO(), req) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } else { - must.NoError(t, err, must.Sprint("name", tc.Name)) + require.NoError(t, err, tc.Name) } }) } @@ -831,24 +832,24 @@ func TestClient_RPC_ControllerCreateVolume(t *testing.T) { resp, err := client.ControllerCreateVolume(context.TODO(), req) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) return } - must.NoError(t, err, must.Sprint("name", tc.Name)) + require.NoError(t, err, tc.Name) if tc.Response == nil { - must.Nil(t, resp) + require.Nil(t, resp) return } if tc.CapacityRange != nil { - must.Greater(t, 0, resp.Volume.CapacityBytes) + require.Greater(t, resp.Volume.CapacityBytes, int64(0)) } if tc.ContentSource != nil { - must.Eq(t, tc.ContentSource.CloneID, resp.Volume.ContentSource.CloneID) - must.Eq(t, tc.ContentSource.SnapshotID, resp.Volume.ContentSource.SnapshotID) + require.Equal(t, tc.ContentSource.CloneID, resp.Volume.ContentSource.CloneID) + require.Equal(t, tc.ContentSource.SnapshotID, resp.Volume.ContentSource.SnapshotID) } if tc.Response != nil && tc.Response.Volume != nil { - must.SliceLen(t, 1, resp.Volume.AccessibleTopology) - must.Eq(t, + require.Len(t, resp.Volume.AccessibleTopology, 1) + require.Equal(t, req.AccessibilityRequirements.Requisite[0].Segments, resp.Volume.AccessibleTopology[0].Segments, ) @@ -893,10 +894,10 @@ func TestClient_RPC_ControllerDeleteVolume(t *testing.T) { cc.NextErr = tc.ResponseErr err := client.ControllerDeleteVolume(context.TODO(), tc.Request) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) return } - must.NoError(t, err, must.Sprint("name", tc.Name)) + require.NoError(t, err, tc.Name) }) } } @@ -986,11 +987,11 @@ func TestClient_RPC_ControllerListVolume(t *testing.T) { resp, err := client.ControllerListVolumes(context.TODO(), tc.Request) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) return } - must.NoError(t, err, must.Sprint("name", tc.Name)) - must.NotNil(t, resp) + require.NoError(t, err, tc.Name) + require.NotNil(t, resp) }) } @@ -1053,11 +1054,11 @@ func TestClient_RPC_ControllerCreateSnapshot(t *testing.T) { // from protobuf to our struct resp, err := client.ControllerCreateSnapshot(context.TODO(), tc.Request) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } else { - must.NoError(t, err, must.Sprint("name", tc.Name)) - must.Positive(t, resp.Snapshot.CreateTime) - must.Eq(t, now.Second(), time.Unix(resp.Snapshot.CreateTime, 0).Second()) + require.NoError(t, err, tc.Name) + require.NotZero(t, resp.Snapshot.CreateTime) + require.Equal(t, now.Second(), time.Unix(resp.Snapshot.CreateTime, 0).Second()) } }) } @@ -1098,10 +1099,10 @@ func TestClient_RPC_ControllerDeleteSnapshot(t *testing.T) { cc.NextErr = tc.ResponseErr err := client.ControllerDeleteSnapshot(context.TODO(), tc.Request) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) return } - must.NoError(t, err, must.Sprint("name", tc.Name)) + require.NoError(t, err, tc.Name) }) } } @@ -1161,14 +1162,14 @@ func TestClient_RPC_ControllerListSnapshots(t *testing.T) { resp, err := client.ControllerListSnapshots(context.TODO(), tc.Request) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) return } - must.NoError(t, err, must.Sprint("name", tc.Name)) - must.NotNil(t, resp) - must.Len(t, 1, resp.Entries) - must.Positive(t, resp.Entries[0].Snapshot.CreateTime) - must.Eq(t, now.Second(), + require.NoError(t, err, tc.Name) + require.NotNil(t, resp) + require.Len(t, resp.Entries, 1) + require.NotZero(t, resp.Entries[0].Snapshot.CreateTime) + require.Equal(t, now.Second(), time.Unix(resp.Entries[0].Snapshot.CreateTime, 0).Second()) }) } @@ -1358,9 +1359,9 @@ func TestClient_RPC_NodeStageVolume(t *testing.T) { VolumeCapability: &VolumeCapability{}, }) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } else { - must.NoError(t, err) + require.Nil(t, err) } }) } @@ -1397,9 +1398,9 @@ func TestClient_RPC_NodeUnstageVolume(t *testing.T) { err := client.NodeUnstageVolume(context.TODO(), "foo", "/foo") if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } else { - must.NoError(t, err) + require.Nil(t, err) } }) } @@ -1455,9 +1456,9 @@ func TestClient_RPC_NodePublishVolume(t *testing.T) { err := client.NodePublishVolume(context.TODO(), tc.Request) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } else { - must.NoError(t, err) + require.Nil(t, err) } }) } @@ -1510,9 +1511,9 @@ func TestClient_RPC_NodeUnpublishVolume(t *testing.T) { err := client.NodeUnpublishVolume(context.TODO(), tc.ExternalID, tc.TargetPath) if tc.ExpectedErr != nil { - must.EqError(t, err, tc.ExpectedErr.Error()) + require.EqualError(t, err, tc.ExpectedErr.Error()) } else { - must.NoError(t, err) + require.Nil(t, err) } }) } diff --git a/plugins/drivers/client.go b/plugins/drivers/client.go index 3c27df330037..c5447dc7c695 100644 --- a/plugins/drivers/client.go +++ b/plugins/drivers/client.go @@ -16,7 +16,6 @@ import ( "github.com/hashicorp/nomad/helper/pluginutils/grpcutils" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/drivers/proto" "github.com/hashicorp/nomad/plugins/shared/hclspec" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" @@ -67,21 +66,18 @@ func (d *driverPluginClient) Capabilities() (*Capabilities, error) { switch resp.Capabilities.FsIsolation { case proto.DriverCapabilities_NONE: - caps.FSIsolation = fsisolation.None + caps.FSIsolation = FSIsolationNone case proto.DriverCapabilities_CHROOT: - caps.FSIsolation = fsisolation.Chroot + caps.FSIsolation = FSIsolationChroot case proto.DriverCapabilities_IMAGE: - caps.FSIsolation = fsisolation.Image - case proto.DriverCapabilities_UNVEIL: - caps.FSIsolation = fsisolation.Unveil + caps.FSIsolation = FSIsolationImage default: - caps.FSIsolation = fsisolation.None + caps.FSIsolation = FSIsolationNone } caps.MountConfigs = MountConfigSupport(resp.Capabilities.MountConfigs) caps.RemoteTasks = resp.Capabilities.RemoteTasks caps.DisableLogCollection = resp.Capabilities.DisableLogCollection - caps.DynamicWorkloadUsers = resp.Capabilities.DynamicWorkloadUsers } return caps, nil diff --git a/plugins/drivers/driver.go b/plugins/drivers/driver.go index cbbf3a4cbae3..6fcd78fb9923 100644 --- a/plugins/drivers/driver.go +++ b/plugins/drivers/driver.go @@ -17,7 +17,6 @@ import ( cstructs "github.com/hashicorp/nomad/client/structs" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/drivers/proto" "github.com/hashicorp/nomad/plugins/shared/hclspec" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" @@ -133,18 +132,20 @@ type Fingerprint struct { Err error } -// Deprecated: use fsisolation.Mode instead. -type FSIsolation = fsisolation.Mode +// FSIsolation is an enumeration to describe what kind of filesystem isolation +// a driver supports. +type FSIsolation string var ( - // Deprecated: use fsisolation.None instead. - FSIsolationNone = fsisolation.None + // FSIsolationNone means no isolation. The host filesystem is used. + FSIsolationNone = FSIsolation("none") - // Deprecated: use fsisolation.Chroot instead. - FSIsolationChroot = fsisolation.Chroot + // FSIsolationChroot means the driver will use a chroot on the host + // filesystem. + FSIsolationChroot = FSIsolation("chroot") - // Deprecated: use fsisolation.Image instead. - FSIsolationImage = fsisolation.Image + // FSIsolationImage means the driver uses an image. + FSIsolationImage = FSIsolation("image") ) type Capabilities struct { @@ -156,7 +157,7 @@ type Capabilities struct { Exec bool //FSIsolation indicates what kind of filesystem isolation the driver supports. - FSIsolation fsisolation.Mode + FSIsolation FSIsolation //NetIsolationModes lists the set of isolation modes supported by the driver NetIsolationModes []NetIsolationMode @@ -177,12 +178,6 @@ type Capabilities struct { // DisableLogCollection indicates this driver has disabled log collection // and the client should not start a logmon process. DisableLogCollection bool - - // DynamicWorkloadUsers indicates this driver is capable (but not required) - // of making use of UID/GID not backed by a user known to the operating system. - // The allocation of a unique, not-in-use UID/GID is managed by Nomad client - // ensuring no overlap. - DynamicWorkloadUsers bool } func (c *Capabilities) HasNetIsolationMode(m NetIsolationMode) bool { @@ -449,15 +444,13 @@ type MountConfig struct { HostPath string Readonly bool PropagationMode string - SELinuxLabel string } func (m *MountConfig) IsEqual(o *MountConfig) bool { return m.TaskPath == o.TaskPath && m.HostPath == o.HostPath && m.Readonly == o.Readonly && - m.PropagationMode == o.PropagationMode && - m.SELinuxLabel == o.SELinuxLabel + m.PropagationMode == o.PropagationMode } func (m *MountConfig) Copy() *MountConfig { diff --git a/plugins/drivers/fsisolation/isolation.go b/plugins/drivers/fsisolation/isolation.go deleted file mode 100644 index 417e657595bd..000000000000 --- a/plugins/drivers/fsisolation/isolation.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: MPL-2.0 - -package fsisolation - -// Mode is an enum to describe what kind of filesystem isolation a -// driver supports. -type Mode string - -const ( - // IsolationNone means no isolation. The host filesystem is used. - None = Mode("none") - - // IsolationChroot means the driver will use a chroot on the host - // filesystem. - Chroot = Mode("chroot") - - // IsolationImage means the driver uses an image. - Image = Mode("image") - - // IsolationUnveil means the driver and client will work together using - // unveil() syscall semantics (i.e. landlock on linux) isolate the host - // filesytem from workloads. - Unveil = Mode("unveil") -) diff --git a/plugins/drivers/proto/driver.pb.go b/plugins/drivers/proto/driver.pb.go index f346f4b304a5..6cee0af890d8 100644 --- a/plugins/drivers/proto/driver.pb.go +++ b/plugins/drivers/proto/driver.pb.go @@ -118,21 +118,18 @@ const ( DriverCapabilities_NONE DriverCapabilities_FSIsolation = 0 DriverCapabilities_CHROOT DriverCapabilities_FSIsolation = 1 DriverCapabilities_IMAGE DriverCapabilities_FSIsolation = 2 - DriverCapabilities_UNVEIL DriverCapabilities_FSIsolation = 3 ) var DriverCapabilities_FSIsolation_name = map[int32]string{ 0: "NONE", 1: "CHROOT", 2: "IMAGE", - 3: "UNVEIL", } var DriverCapabilities_FSIsolation_value = map[string]int32{ "NONE": 0, "CHROOT": 1, "IMAGE": 2, - "UNVEIL": 3, } func (x DriverCapabilities_FSIsolation) String() string { @@ -1854,10 +1851,7 @@ type DriverCapabilities struct { RemoteTasks bool `protobuf:"varint,7,opt,name=remote_tasks,json=remoteTasks,proto3" json:"remote_tasks,omitempty"` // disable_log_collection indicates whether the driver has the capability of // disabling log collection - DisableLogCollection bool `protobuf:"varint,8,opt,name=disable_log_collection,json=disableLogCollection,proto3" json:"disable_log_collection,omitempty"` - // dynamic_workload_users indicates the task is capable of using UID/GID - // assigned from the Nomad client as user credentials for the task. - DynamicWorkloadUsers bool `protobuf:"varint,9,opt,name=dynamic_workload_users,json=dynamicWorkloadUsers,proto3" json:"dynamic_workload_users,omitempty"` + DisableLogCollection bool `protobuf:"varint,8,opt,name=disable_log_collection,json=disableLogCollection,proto3" json:"disable_log_collection,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1944,13 +1938,6 @@ func (m *DriverCapabilities) GetDisableLogCollection() bool { return false } -func (m *DriverCapabilities) GetDynamicWorkloadUsers() bool { - if m != nil { - return m.DynamicWorkloadUsers - } - return false -} - type NetworkIsolationSpec struct { Mode NetworkIsolationSpec_NetworkIsolationMode `protobuf:"varint,1,opt,name=mode,proto3,enum=hashicorp.nomad.plugins.drivers.proto.NetworkIsolationSpec_NetworkIsolationMode" json:"mode,omitempty"` Path string `protobuf:"bytes,2,opt,name=path,proto3" json:"path,omitempty"` @@ -2855,7 +2842,6 @@ type Mount struct { // Propagation mode for the mount. Not exactly the same as the unix mount // propagation flags. See callsite usage for details. PropagationMode string `protobuf:"bytes,4,opt,name=propagation_mode,json=propagationMode,proto3" json:"propagation_mode,omitempty"` - SelinuxLabel string `protobuf:"bytes,5,opt,name=selinux_label,json=selinuxLabel,proto3" json:"selinux_label,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -2914,13 +2900,6 @@ func (m *Mount) GetPropagationMode() string { return "" } -func (m *Mount) GetSelinuxLabel() string { - if m != nil { - return m.SelinuxLabel - } - return "" -} - type Device struct { // TaskPath is the file path within the task to mount the device to TaskPath string `protobuf:"bytes,1,opt,name=task_path,json=taskPath,proto3" json:"task_path,omitempty"` @@ -3768,254 +3747,251 @@ func init() { } var fileDescriptor_4a8f45747846a74d = []byte{ - // 3945 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0x4f, 0x73, 0xdb, 0x48, - 0x76, 0x17, 0x08, 0x92, 0x22, 0x1f, 0x25, 0x0a, 0x6a, 0x49, 0x36, 0xcd, 0xd9, 0x64, 0xbc, 0xd8, - 0x9a, 0x94, 0xb2, 0x3b, 0x43, 0xcf, 0x6a, 0x93, 0xf1, 0xd8, 0xeb, 0x59, 0x0f, 0x87, 0xa2, 0x2d, - 0xda, 0x12, 0xa5, 0x34, 0xa9, 0x78, 0x1d, 0x27, 0x83, 0x40, 0x40, 0x9b, 0x82, 0x45, 0x02, 0x18, - 0x34, 0x28, 0x4b, 0x9b, 0x4a, 0x25, 0xb5, 0xa9, 0x4a, 0x6d, 0xaa, 0x92, 0x4a, 0x2e, 0x93, 0xbd, - 0xe4, 0xb4, 0x55, 0x39, 0xa5, 0x72, 0x4f, 0x6d, 0x6a, 0x4e, 0x39, 0xe4, 0x4b, 0xe4, 0x92, 0x5b, - 0x6e, 0xa9, 0x7c, 0x82, 0x6c, 0xf5, 0x1f, 0x80, 0x80, 0x48, 0x8f, 0x41, 0xca, 0x27, 0xf2, 0xbd, - 0xee, 0xfe, 0xf5, 0xc3, 0x7b, 0xaf, 0x5f, 0xbf, 0xee, 0x7e, 0xa0, 0xfb, 0xc3, 0xf1, 0xc0, 0x71, - 0xe9, 0x1d, 0x3b, 0x70, 0xce, 0x49, 0x40, 0xef, 0xf8, 0x81, 0x17, 0x7a, 0x92, 0x6a, 0x70, 0x02, - 0x7d, 0x70, 0x6a, 0xd2, 0x53, 0xc7, 0xf2, 0x02, 0xbf, 0xe1, 0x7a, 0x23, 0xd3, 0x6e, 0xc8, 0x31, - 0x0d, 0x39, 0x46, 0x74, 0xab, 0xff, 0xf6, 0xc0, 0xf3, 0x06, 0x43, 0x22, 0x10, 0x4e, 0xc6, 0x2f, - 0xef, 0xd8, 0xe3, 0xc0, 0x0c, 0x1d, 0xcf, 0x95, 0xed, 0xef, 0x5f, 0x6d, 0x0f, 0x9d, 0x11, 0xa1, - 0xa1, 0x39, 0xf2, 0x65, 0x87, 0x0f, 0x22, 0x59, 0xe8, 0xa9, 0x19, 0x10, 0xfb, 0xce, 0xa9, 0x35, - 0xa4, 0x3e, 0xb1, 0xd8, 0xaf, 0xc1, 0xfe, 0xc8, 0x6e, 0x1f, 0x5e, 0xe9, 0x46, 0xc3, 0x60, 0x6c, - 0x85, 0x91, 0xe4, 0x66, 0x18, 0x06, 0xce, 0xc9, 0x38, 0x24, 0xa2, 0xb7, 0x7e, 0x0b, 0x6e, 0xf6, - 0x4d, 0x7a, 0xd6, 0xf2, 0xdc, 0x97, 0xce, 0xa0, 0x67, 0x9d, 0x92, 0x91, 0x89, 0xc9, 0x57, 0x63, - 0x42, 0x43, 0xfd, 0x8f, 0xa1, 0x36, 0xdd, 0x44, 0x7d, 0xcf, 0xa5, 0x04, 0x7d, 0x0e, 0x79, 0x36, - 0x65, 0x4d, 0xb9, 0xad, 0x6c, 0x57, 0x76, 0x3e, 0x6c, 0xbc, 0x49, 0x05, 0x42, 0x86, 0x86, 0x14, - 0xb5, 0xd1, 0xf3, 0x89, 0x85, 0xf9, 0x48, 0x7d, 0x0b, 0x36, 0x5a, 0xa6, 0x6f, 0x9e, 0x38, 0x43, - 0x27, 0x74, 0x08, 0x8d, 0x26, 0x1d, 0xc3, 0x66, 0x9a, 0x2d, 0x27, 0xfc, 0x13, 0x58, 0xb1, 0x12, - 0x7c, 0x39, 0xf1, 0xbd, 0x46, 0x26, 0xdd, 0x37, 0x76, 0x39, 0x95, 0x02, 0x4e, 0xc1, 0xe9, 0x9b, - 0x80, 0x1e, 0x39, 0xee, 0x80, 0x04, 0x7e, 0xe0, 0xb8, 0x61, 0x24, 0xcc, 0x37, 0x2a, 0x6c, 0xa4, - 0xd8, 0x52, 0x98, 0x57, 0x00, 0xb1, 0x1e, 0x99, 0x28, 0xea, 0x76, 0x65, 0xe7, 0x49, 0x46, 0x51, - 0x66, 0xe0, 0x35, 0x9a, 0x31, 0x58, 0xdb, 0x0d, 0x83, 0x4b, 0x9c, 0x40, 0x47, 0x5f, 0x42, 0xf1, - 0x94, 0x98, 0xc3, 0xf0, 0xb4, 0x96, 0xbb, 0xad, 0x6c, 0x57, 0x77, 0x1e, 0x5d, 0x63, 0x9e, 0x3d, - 0x0e, 0xd4, 0x0b, 0xcd, 0x90, 0x60, 0x89, 0x8a, 0x3e, 0x02, 0x24, 0xfe, 0x19, 0x36, 0xa1, 0x56, - 0xe0, 0xf8, 0xcc, 0x25, 0x6b, 0xea, 0x6d, 0x65, 0xbb, 0x8c, 0xd7, 0x45, 0xcb, 0xee, 0xa4, 0xa1, - 0xee, 0xc3, 0xda, 0x15, 0x69, 0x91, 0x06, 0xea, 0x19, 0xb9, 0xe4, 0x16, 0x29, 0x63, 0xf6, 0x17, - 0x3d, 0x86, 0xc2, 0xb9, 0x39, 0x1c, 0x13, 0x2e, 0x72, 0x65, 0xe7, 0x87, 0x6f, 0x73, 0x0f, 0xe9, - 0xa2, 0x13, 0x3d, 0x60, 0x31, 0xfe, 0x7e, 0xee, 0x53, 0x45, 0xbf, 0x07, 0x95, 0x84, 0xdc, 0xa8, - 0x0a, 0x70, 0xdc, 0xdd, 0x6d, 0xf7, 0xdb, 0xad, 0x7e, 0x7b, 0x57, 0x5b, 0x42, 0xab, 0x50, 0x3e, - 0xee, 0xee, 0xb5, 0x9b, 0xfb, 0xfd, 0xbd, 0xe7, 0x9a, 0x82, 0x2a, 0xb0, 0x1c, 0x11, 0x39, 0xfd, - 0x02, 0x10, 0x26, 0x96, 0x77, 0x4e, 0x02, 0xe6, 0xc8, 0xd2, 0xaa, 0xe8, 0x26, 0x2c, 0x87, 0x26, - 0x3d, 0x33, 0x1c, 0x5b, 0xca, 0x5c, 0x64, 0x64, 0xc7, 0x46, 0x1d, 0x28, 0x9e, 0x9a, 0xae, 0x3d, - 0x7c, 0xbb, 0xdc, 0x69, 0x55, 0x33, 0xf0, 0x3d, 0x3e, 0x10, 0x4b, 0x00, 0xe6, 0xdd, 0xa9, 0x99, - 0x85, 0x01, 0xf4, 0xe7, 0xa0, 0xf5, 0x42, 0x33, 0x08, 0x93, 0xe2, 0xb4, 0x21, 0xcf, 0xe6, 0x97, - 0x1e, 0x3d, 0xcf, 0x9c, 0x62, 0x65, 0x62, 0x3e, 0x5c, 0xff, 0xbf, 0x1c, 0xac, 0x27, 0xb0, 0xa5, - 0xa7, 0x3e, 0x83, 0x62, 0x40, 0xe8, 0x78, 0x18, 0x72, 0xf8, 0xea, 0xce, 0xc3, 0x8c, 0xf0, 0x53, - 0x48, 0x0d, 0xcc, 0x61, 0xb0, 0x84, 0x43, 0xdb, 0xa0, 0x89, 0x11, 0x06, 0x09, 0x02, 0x2f, 0x30, - 0x46, 0x74, 0xc0, 0xb5, 0x56, 0xc6, 0x55, 0xc1, 0x6f, 0x33, 0xf6, 0x01, 0x1d, 0x24, 0xb4, 0xaa, - 0x5e, 0x53, 0xab, 0xc8, 0x04, 0xcd, 0x25, 0xe1, 0x6b, 0x2f, 0x38, 0x33, 0x98, 0x6a, 0x03, 0xc7, - 0x26, 0xb5, 0x3c, 0x07, 0xfd, 0x24, 0x23, 0x68, 0x57, 0x0c, 0x3f, 0x94, 0xa3, 0xf1, 0x9a, 0x9b, - 0x66, 0xe8, 0x3f, 0x80, 0xa2, 0xf8, 0x52, 0xe6, 0x49, 0xbd, 0xe3, 0x56, 0xab, 0xdd, 0xeb, 0x69, - 0x4b, 0xa8, 0x0c, 0x05, 0xdc, 0xee, 0x63, 0xe6, 0x61, 0x65, 0x28, 0x3c, 0x6a, 0xf6, 0x9b, 0xfb, - 0x5a, 0x4e, 0xff, 0x3e, 0xac, 0x3d, 0x33, 0x9d, 0x30, 0x8b, 0x73, 0xe9, 0x1e, 0x68, 0x93, 0xbe, - 0xd2, 0x3a, 0x9d, 0x94, 0x75, 0xb2, 0xab, 0xa6, 0x7d, 0xe1, 0x84, 0x57, 0xec, 0xa1, 0x81, 0x4a, - 0x82, 0x40, 0x9a, 0x80, 0xfd, 0xd5, 0x5f, 0xc3, 0x5a, 0x2f, 0xf4, 0xfc, 0x4c, 0x9e, 0xff, 0x23, - 0x58, 0x66, 0xbb, 0x8d, 0x37, 0x0e, 0xa5, 0xeb, 0xdf, 0x6a, 0x88, 0xdd, 0xa8, 0x11, 0xed, 0x46, - 0x8d, 0x5d, 0xb9, 0x5b, 0xe1, 0xa8, 0x27, 0xba, 0x01, 0x45, 0xea, 0x0c, 0x5c, 0x73, 0x28, 0xa3, - 0x85, 0xa4, 0x74, 0xc4, 0x9c, 0x3c, 0x9a, 0x58, 0x3a, 0x7e, 0x0b, 0xd0, 0x2e, 0xa1, 0x61, 0xe0, - 0x5d, 0x66, 0x92, 0x67, 0x13, 0x0a, 0x2f, 0xbd, 0xc0, 0x12, 0x0b, 0xb1, 0x84, 0x05, 0xc1, 0x16, - 0x55, 0x0a, 0x44, 0x62, 0x7f, 0x04, 0xa8, 0xe3, 0xb2, 0x3d, 0x25, 0x9b, 0x21, 0xfe, 0x21, 0x07, - 0x1b, 0xa9, 0xfe, 0xd2, 0x18, 0x8b, 0xaf, 0x43, 0x16, 0x98, 0xc6, 0x54, 0xac, 0x43, 0x74, 0x08, - 0x45, 0xd1, 0x43, 0x6a, 0xf2, 0xee, 0x1c, 0x40, 0x62, 0x9b, 0x92, 0x70, 0x12, 0x66, 0xa6, 0xd3, - 0xab, 0xef, 0xd6, 0xe9, 0x5f, 0x83, 0x16, 0x7d, 0x07, 0x7d, 0xab, 0x6d, 0x9e, 0xc0, 0x86, 0xe5, - 0x0d, 0x87, 0xc4, 0x62, 0xde, 0x60, 0x38, 0x6e, 0x48, 0x82, 0x73, 0x73, 0xf8, 0x76, 0xbf, 0x41, - 0x93, 0x51, 0x1d, 0x39, 0x48, 0x7f, 0x01, 0xeb, 0x89, 0x89, 0xa5, 0x21, 0x1e, 0x41, 0x81, 0x32, - 0x86, 0xb4, 0xc4, 0xc7, 0x73, 0x5a, 0x82, 0x62, 0x31, 0x5c, 0xdf, 0x10, 0xe0, 0xed, 0x73, 0xe2, - 0xc6, 0x9f, 0xa5, 0xef, 0xc2, 0x7a, 0x8f, 0xbb, 0x69, 0x26, 0x3f, 0x9c, 0xb8, 0x78, 0x2e, 0xe5, - 0xe2, 0x9b, 0x80, 0x92, 0x28, 0xd2, 0x11, 0x2f, 0x61, 0xad, 0x7d, 0x41, 0xac, 0x4c, 0xc8, 0x35, - 0x58, 0xb6, 0xbc, 0xd1, 0xc8, 0x74, 0xed, 0x5a, 0xee, 0xb6, 0xba, 0x5d, 0xc6, 0x11, 0x99, 0x5c, - 0x8b, 0x6a, 0xd6, 0xb5, 0xa8, 0xff, 0x9d, 0x02, 0xda, 0x64, 0x6e, 0xa9, 0x48, 0x26, 0x7d, 0x68, - 0x33, 0x20, 0x36, 0xf7, 0x0a, 0x96, 0x94, 0xe4, 0x47, 0xe1, 0x42, 0xf0, 0x49, 0x10, 0x24, 0xc2, - 0x91, 0x7a, 0xcd, 0x70, 0xa4, 0xef, 0xc1, 0x77, 0x22, 0x71, 0x7a, 0x61, 0x40, 0xcc, 0x91, 0xe3, - 0x0e, 0x3a, 0x87, 0x87, 0x3e, 0x11, 0x82, 0x23, 0x04, 0x79, 0xdb, 0x0c, 0x4d, 0x29, 0x18, 0xff, - 0xcf, 0x16, 0xbd, 0x35, 0xf4, 0x68, 0xbc, 0xe8, 0x39, 0xa1, 0xff, 0xa7, 0x0a, 0xb5, 0x29, 0xa8, - 0x48, 0xbd, 0x2f, 0xa0, 0x40, 0x49, 0x38, 0xf6, 0xa5, 0xab, 0xb4, 0x33, 0x0b, 0x3c, 0x1b, 0xaf, - 0xd1, 0x63, 0x60, 0x58, 0x60, 0xa2, 0x01, 0x94, 0xc2, 0xf0, 0xd2, 0xa0, 0xce, 0xcf, 0xa2, 0x84, - 0x60, 0xff, 0xba, 0xf8, 0x7d, 0x12, 0x8c, 0x1c, 0xd7, 0x1c, 0xf6, 0x9c, 0x9f, 0x11, 0xbc, 0x1c, - 0x86, 0x97, 0xec, 0x0f, 0x7a, 0xce, 0x1c, 0xde, 0x76, 0x5c, 0xa9, 0xf6, 0xd6, 0xa2, 0xb3, 0x24, - 0x14, 0x8c, 0x05, 0x62, 0x7d, 0x1f, 0x0a, 0xfc, 0x9b, 0x16, 0x71, 0x44, 0x0d, 0xd4, 0x30, 0xbc, - 0xe4, 0x42, 0x95, 0x30, 0xfb, 0x5b, 0x7f, 0x00, 0x2b, 0xc9, 0x2f, 0x60, 0x8e, 0x74, 0x4a, 0x9c, - 0xc1, 0xa9, 0x70, 0xb0, 0x02, 0x96, 0x14, 0xb3, 0xe4, 0x6b, 0xc7, 0x96, 0x29, 0x6b, 0x01, 0x0b, - 0x42, 0xff, 0xb7, 0x1c, 0xdc, 0x9a, 0xa1, 0x19, 0xe9, 0xac, 0x2f, 0x52, 0xce, 0xfa, 0x8e, 0xb4, - 0x10, 0x79, 0xfc, 0x8b, 0x94, 0xc7, 0xbf, 0x43, 0x70, 0xb6, 0x6c, 0x6e, 0x40, 0x91, 0x5c, 0x38, - 0x21, 0xb1, 0xa5, 0xaa, 0x24, 0x95, 0x58, 0x4e, 0xf9, 0xeb, 0x2e, 0xa7, 0x03, 0xd8, 0x6c, 0x05, - 0xc4, 0x0c, 0x89, 0x0c, 0xe5, 0x91, 0xff, 0xdf, 0x82, 0x92, 0x39, 0x1c, 0x7a, 0xd6, 0xc4, 0xac, - 0xcb, 0x9c, 0xee, 0xd8, 0xa8, 0x0e, 0xa5, 0x53, 0x8f, 0x86, 0xae, 0x39, 0x22, 0x32, 0x78, 0xc5, - 0xb4, 0xfe, 0xb5, 0x02, 0x5b, 0x57, 0xf0, 0xa4, 0x15, 0x4e, 0xa0, 0xea, 0x50, 0x6f, 0xc8, 0x3f, - 0xd0, 0x48, 0x9c, 0xf0, 0x7e, 0x3c, 0xdf, 0x56, 0xd3, 0x89, 0x30, 0xf8, 0x81, 0x6f, 0xd5, 0x49, - 0x92, 0xdc, 0xe3, 0xf8, 0xe4, 0xb6, 0x5c, 0xe9, 0x11, 0xa9, 0xff, 0xa3, 0x02, 0x5b, 0x72, 0x87, - 0xcf, 0xfe, 0xa1, 0xd3, 0x22, 0xe7, 0xde, 0xb5, 0xc8, 0x7a, 0x0d, 0x6e, 0x5c, 0x95, 0x4b, 0xc6, - 0xfc, 0xff, 0x2d, 0x00, 0x9a, 0x3e, 0x5d, 0xa2, 0xef, 0xc2, 0x0a, 0x25, 0xae, 0x6d, 0x88, 0xfd, - 0x42, 0x6c, 0x65, 0x25, 0x5c, 0x61, 0x3c, 0xb1, 0x71, 0x50, 0x16, 0x02, 0xc9, 0x85, 0x94, 0xb6, - 0x84, 0xf9, 0x7f, 0x74, 0x0a, 0x2b, 0x2f, 0xa9, 0x11, 0xcf, 0xcd, 0x1d, 0xaa, 0x9a, 0x39, 0xac, - 0x4d, 0xcb, 0xd1, 0x78, 0xd4, 0x8b, 0xbf, 0x0b, 0x57, 0x5e, 0xd2, 0x98, 0x40, 0xbf, 0x50, 0xe0, - 0x66, 0x94, 0x56, 0x4c, 0xd4, 0x37, 0xf2, 0x6c, 0x42, 0x6b, 0xf9, 0xdb, 0xea, 0x76, 0x75, 0xe7, - 0xe8, 0x1a, 0xfa, 0x9b, 0x62, 0x1e, 0x78, 0x36, 0xc1, 0x5b, 0xee, 0x0c, 0x2e, 0x45, 0x0d, 0xd8, - 0x18, 0x8d, 0x69, 0x68, 0x08, 0x2f, 0x30, 0x64, 0xa7, 0x5a, 0x81, 0xeb, 0x65, 0x9d, 0x35, 0xa5, - 0x7c, 0x15, 0x9d, 0xc1, 0xea, 0xc8, 0x1b, 0xbb, 0xa1, 0x61, 0xf1, 0xf3, 0x0f, 0xad, 0x15, 0xe7, - 0x3a, 0x18, 0xcf, 0xd0, 0xd2, 0x01, 0x83, 0x13, 0xa7, 0x29, 0x8a, 0x57, 0x46, 0x09, 0x8a, 0x19, - 0x32, 0x20, 0x23, 0x2f, 0x24, 0x06, 0x8b, 0x97, 0xb4, 0xb6, 0x2c, 0x0c, 0x29, 0x78, 0x2c, 0x34, - 0x50, 0xf4, 0x7b, 0x70, 0xc3, 0x76, 0xa8, 0x79, 0x32, 0x24, 0xc6, 0xd0, 0x1b, 0x18, 0x93, 0x34, - 0xa7, 0x56, 0xe2, 0x9d, 0x37, 0x65, 0xeb, 0xbe, 0x37, 0x68, 0xc5, 0x6d, 0x7c, 0xd4, 0xa5, 0x6b, - 0x8e, 0x1c, 0xcb, 0x60, 0x5f, 0x35, 0xf4, 0x4c, 0xdb, 0x18, 0x53, 0x12, 0xd0, 0x5a, 0x59, 0x8e, - 0x12, 0xad, 0xcf, 0x64, 0xe3, 0x31, 0x6b, 0xd3, 0xef, 0x43, 0x25, 0x61, 0x52, 0x54, 0x82, 0x7c, - 0xf7, 0xb0, 0xdb, 0xd6, 0x96, 0x10, 0x40, 0xb1, 0xb5, 0x87, 0x0f, 0x0f, 0xfb, 0xe2, 0x84, 0xd2, - 0x39, 0x68, 0x3e, 0x6e, 0x6b, 0x39, 0xc6, 0x3e, 0xee, 0xfe, 0x61, 0xbb, 0xb3, 0xaf, 0xa9, 0x7a, - 0x1b, 0x56, 0x92, 0x1f, 0x8a, 0x10, 0x54, 0x8f, 0xbb, 0x4f, 0xbb, 0x87, 0xcf, 0xba, 0xc6, 0xc1, - 0xe1, 0x71, 0xb7, 0xcf, 0xce, 0x39, 0x55, 0x80, 0x66, 0xf7, 0xf9, 0x84, 0x5e, 0x85, 0x72, 0xf7, - 0x30, 0x22, 0x95, 0x7a, 0x4e, 0x53, 0xf4, 0xff, 0x50, 0x61, 0x73, 0x96, 0xcd, 0x91, 0x0d, 0x79, - 0xe6, 0x3f, 0xf2, 0xa4, 0xf9, 0xee, 0xdd, 0x87, 0xa3, 0xb3, 0x65, 0xe3, 0x9b, 0x72, 0x6b, 0x29, - 0x63, 0xfe, 0x1f, 0x19, 0x50, 0x1c, 0x9a, 0x27, 0x64, 0x48, 0x6b, 0x2a, 0xbf, 0x8b, 0x79, 0x7c, - 0x9d, 0xb9, 0xf7, 0x39, 0x92, 0xb8, 0x88, 0x91, 0xb0, 0xa8, 0x0f, 0x15, 0x16, 0x3c, 0xa9, 0x50, - 0x9d, 0x8c, 0xe7, 0x3b, 0x19, 0x67, 0xd9, 0x9b, 0x8c, 0xc4, 0x49, 0x98, 0xfa, 0x3d, 0xa8, 0x24, - 0x26, 0x9b, 0x71, 0x8f, 0xb2, 0x99, 0xbc, 0x47, 0x29, 0x27, 0x2f, 0x45, 0x1e, 0x4e, 0xdb, 0x80, - 0xe9, 0x88, 0x39, 0xc4, 0xde, 0x61, 0xaf, 0x2f, 0x4e, 0xac, 0x8f, 0xf1, 0xe1, 0xf1, 0x91, 0xa6, - 0x30, 0x66, 0xbf, 0xd9, 0x7b, 0xaa, 0xe5, 0x62, 0x7f, 0x51, 0xf5, 0x16, 0x54, 0x12, 0x72, 0xa5, - 0x76, 0x0b, 0x25, 0xbd, 0x5b, 0xb0, 0x78, 0x6d, 0xda, 0x76, 0x40, 0x28, 0x95, 0x72, 0x44, 0xa4, - 0xfe, 0x02, 0xca, 0xbb, 0xdd, 0x9e, 0x84, 0xa8, 0xc1, 0x32, 0x25, 0x01, 0xfb, 0x6e, 0x7e, 0x23, - 0x56, 0xc6, 0x11, 0xc9, 0xc0, 0x29, 0x31, 0x03, 0xeb, 0x94, 0x50, 0x99, 0x63, 0xc4, 0x34, 0x1b, - 0xe5, 0xf1, 0x9b, 0x25, 0x61, 0xbb, 0x32, 0x8e, 0x48, 0xfd, 0xff, 0x4b, 0x00, 0x93, 0x5b, 0x0e, - 0x54, 0x85, 0x5c, 0x1c, 0xfb, 0x73, 0x8e, 0xcd, 0xfc, 0x20, 0xb1, 0xb7, 0xf1, 0xff, 0x68, 0x07, - 0xb6, 0x46, 0x74, 0xe0, 0x9b, 0xd6, 0x99, 0x21, 0x2f, 0x27, 0x44, 0x88, 0xe0, 0x71, 0x74, 0x05, - 0x6f, 0xc8, 0x46, 0x19, 0x01, 0x04, 0xee, 0x3e, 0xa8, 0xc4, 0x3d, 0xe7, 0x31, 0xaf, 0xb2, 0x73, - 0x7f, 0xee, 0xdb, 0x97, 0x46, 0xdb, 0x3d, 0x17, 0xbe, 0xc2, 0x60, 0x90, 0x01, 0x60, 0x93, 0x73, - 0xc7, 0x22, 0x06, 0x03, 0x2d, 0x70, 0xd0, 0xcf, 0xe7, 0x07, 0xdd, 0xe5, 0x18, 0x31, 0x74, 0xd9, - 0x8e, 0x68, 0xd4, 0x85, 0x72, 0x40, 0xa8, 0x37, 0x0e, 0x2c, 0x22, 0x02, 0x5f, 0xf6, 0x03, 0x12, - 0x8e, 0xc6, 0xe1, 0x09, 0x04, 0xda, 0x85, 0x22, 0x8f, 0x77, 0x2c, 0xb2, 0xa9, 0xdf, 0x7a, 0x95, - 0x9b, 0x06, 0xe3, 0x91, 0x04, 0xcb, 0xb1, 0xe8, 0x31, 0x2c, 0x0b, 0x11, 0x69, 0xad, 0xc4, 0x61, - 0x3e, 0xca, 0x1a, 0x8c, 0xf9, 0x28, 0x1c, 0x8d, 0x66, 0x56, 0x65, 0x41, 0x90, 0xc7, 0xc0, 0x32, - 0xe6, 0xff, 0xd1, 0x7b, 0x50, 0x16, 0x7b, 0xbf, 0xed, 0x04, 0x35, 0x10, 0xce, 0xc9, 0x19, 0xbb, - 0x4e, 0x80, 0xde, 0x87, 0x8a, 0xc8, 0xf1, 0x0c, 0x1e, 0x15, 0x2a, 0xbc, 0x19, 0x04, 0xeb, 0x88, - 0xc5, 0x06, 0xd1, 0x81, 0x04, 0x81, 0xe8, 0xb0, 0x12, 0x77, 0x20, 0x41, 0xc0, 0x3b, 0xfc, 0x0e, - 0xac, 0xf1, 0xcc, 0x78, 0x10, 0x78, 0x63, 0xdf, 0xe0, 0x3e, 0xb5, 0xca, 0x3b, 0xad, 0x32, 0xf6, - 0x63, 0xc6, 0xed, 0x32, 0xe7, 0xba, 0x05, 0xa5, 0x57, 0xde, 0x89, 0xe8, 0x50, 0x15, 0xeb, 0xe0, - 0x95, 0x77, 0x12, 0x35, 0xc5, 0xd9, 0xc9, 0x5a, 0x3a, 0x3b, 0xf9, 0x0a, 0x6e, 0x4c, 0x6f, 0xb3, - 0x3c, 0x4b, 0xd1, 0xae, 0x9f, 0xa5, 0x6c, 0xba, 0xb3, 0xe2, 0xf0, 0x17, 0xa0, 0xda, 0x2e, 0xad, - 0xad, 0xcf, 0xe5, 0x1c, 0xf1, 0x3a, 0xc6, 0x6c, 0x30, 0xda, 0x82, 0x22, 0xfb, 0x58, 0xc7, 0xae, - 0x21, 0x11, 0x7a, 0x5e, 0x79, 0x27, 0x1d, 0x1b, 0x7d, 0x07, 0xca, 0xec, 0xfb, 0xa9, 0x6f, 0x5a, - 0xa4, 0xb6, 0xc1, 0x5b, 0x26, 0x0c, 0x66, 0x28, 0xd7, 0xb3, 0x89, 0x50, 0xd1, 0xa6, 0x30, 0x14, - 0x63, 0x70, 0x1d, 0xdd, 0x84, 0x65, 0xde, 0xe8, 0xd8, 0xb5, 0x2d, 0x71, 0x00, 0x61, 0x64, 0xc7, - 0x46, 0x3a, 0xac, 0xfa, 0x66, 0x40, 0xdc, 0xd0, 0x90, 0x33, 0xde, 0xe0, 0xcd, 0x15, 0xc1, 0x7c, - 0xc2, 0xe6, 0xad, 0x7f, 0x02, 0xa5, 0x68, 0x31, 0xcc, 0x13, 0x26, 0xeb, 0x0f, 0xa0, 0x9a, 0x5e, - 0x4a, 0x73, 0x05, 0xd9, 0x7f, 0xce, 0x41, 0x39, 0x5e, 0x34, 0xc8, 0x85, 0x0d, 0x6e, 0x54, 0x96, - 0xa9, 0x1a, 0x93, 0x35, 0x28, 0xf2, 0xe3, 0xcf, 0x32, 0xaa, 0xb9, 0x19, 0x21, 0xc8, 0x83, 0xba, - 0x5c, 0x90, 0x28, 0x46, 0x9e, 0xcc, 0xf7, 0x25, 0xac, 0x0d, 0x1d, 0x77, 0x7c, 0x91, 0x98, 0x4b, - 0x24, 0xb6, 0xbf, 0x9f, 0x71, 0xae, 0x7d, 0x36, 0x7a, 0x32, 0x47, 0x75, 0x98, 0xa2, 0xd1, 0x1e, - 0x14, 0x7c, 0x2f, 0x08, 0xa3, 0x3d, 0x33, 0xeb, 0x6e, 0x76, 0xe4, 0x05, 0xe1, 0x81, 0xe9, 0xfb, - 0xec, 0xec, 0x26, 0x00, 0xf4, 0xaf, 0x73, 0x70, 0x63, 0xf6, 0x87, 0xa1, 0x2e, 0xa8, 0x96, 0x3f, - 0x96, 0x4a, 0x7a, 0x30, 0xaf, 0x92, 0x5a, 0xfe, 0x78, 0x22, 0x3f, 0x03, 0x42, 0xcf, 0xa0, 0x38, - 0x22, 0x23, 0x2f, 0xb8, 0x94, 0xba, 0x78, 0x38, 0x2f, 0xe4, 0x01, 0x1f, 0x3d, 0x41, 0x95, 0x70, - 0x08, 0x43, 0x49, 0x2e, 0x26, 0x2a, 0xc3, 0xf6, 0x9c, 0xb7, 0x6b, 0x11, 0x24, 0x8e, 0x71, 0xf4, - 0x4f, 0x60, 0x6b, 0xe6, 0xa7, 0xa0, 0xdf, 0x02, 0xb0, 0xfc, 0xb1, 0xc1, 0x5f, 0x3f, 0x84, 0x07, - 0xa9, 0xb8, 0x6c, 0xf9, 0xe3, 0x1e, 0x67, 0xe8, 0x2f, 0xa0, 0xf6, 0x26, 0x79, 0xd9, 0x1a, 0x13, - 0x12, 0x1b, 0xa3, 0x13, 0xae, 0x03, 0x15, 0x97, 0x04, 0xe3, 0xe0, 0x84, 0x2d, 0xa5, 0xa8, 0xd1, - 0xbc, 0x60, 0x1d, 0x54, 0xde, 0xa1, 0x22, 0x3b, 0x98, 0x17, 0x07, 0x27, 0xfa, 0x2f, 0x73, 0xb0, - 0x76, 0x45, 0x64, 0x76, 0x82, 0x15, 0x01, 0x38, 0xba, 0x1b, 0x10, 0x14, 0x8b, 0xc6, 0x96, 0x63, - 0x47, 0xb7, 0xca, 0xfc, 0x3f, 0xdf, 0x87, 0x7d, 0x79, 0xe3, 0x9b, 0x73, 0x7c, 0xb6, 0x7c, 0x46, - 0x27, 0x4e, 0x48, 0x79, 0x52, 0x54, 0xc0, 0x82, 0x40, 0xcf, 0xa1, 0x1a, 0x10, 0xbe, 0xff, 0xdb, - 0x86, 0xf0, 0xb2, 0xc2, 0x5c, 0x5e, 0x26, 0x25, 0x64, 0xce, 0x86, 0x57, 0x23, 0x24, 0x46, 0x51, - 0xf4, 0x0c, 0x56, 0xa3, 0xc4, 0x59, 0x20, 0x17, 0x17, 0x46, 0x5e, 0x91, 0x40, 0x1c, 0x58, 0xbf, - 0x07, 0x95, 0x44, 0x23, 0xfb, 0x30, 0x9e, 0xfd, 0x49, 0x9d, 0x08, 0x22, 0x1d, 0x2d, 0x0a, 0x32, - 0x5a, 0xe8, 0x27, 0x50, 0x49, 0xac, 0x8b, 0x79, 0x86, 0x32, 0x7d, 0x86, 0x1e, 0xd7, 0x67, 0x01, - 0xe7, 0x42, 0x8f, 0xc5, 0x49, 0x96, 0x79, 0x19, 0x8e, 0xcf, 0x35, 0x5a, 0xc6, 0x45, 0x46, 0x76, - 0x7c, 0xfd, 0xd7, 0x39, 0xa8, 0xa6, 0x97, 0x74, 0xe4, 0x47, 0x3e, 0x09, 0x1c, 0xcf, 0x4e, 0xf8, - 0xd1, 0x11, 0x67, 0x30, 0x5f, 0x61, 0xcd, 0x5f, 0x8d, 0xbd, 0xd0, 0x8c, 0x7c, 0xc5, 0xf2, 0xc7, - 0x7f, 0xc0, 0xe8, 0x2b, 0x3e, 0xa8, 0x5e, 0xf1, 0x41, 0xf4, 0x21, 0x20, 0xe9, 0x4a, 0x43, 0x67, - 0xe4, 0x84, 0xc6, 0xc9, 0x65, 0x48, 0x84, 0x8d, 0x55, 0xac, 0x89, 0x96, 0x7d, 0xd6, 0xf0, 0x05, - 0xe3, 0x33, 0xc7, 0xf3, 0xbc, 0x91, 0x41, 0x2d, 0x2f, 0x20, 0x86, 0x69, 0xbf, 0xe2, 0x87, 0x37, - 0x15, 0x57, 0x3c, 0x6f, 0xd4, 0x63, 0xbc, 0xa6, 0xfd, 0x8a, 0x6d, 0xc4, 0x96, 0x3f, 0xa6, 0x24, - 0x34, 0xd8, 0x0f, 0xcf, 0x5d, 0xca, 0x18, 0x04, 0xab, 0xe5, 0x8f, 0x29, 0xfa, 0x1e, 0xac, 0x46, - 0x1d, 0xf8, 0x5e, 0x2c, 0x93, 0x80, 0x15, 0xd9, 0x85, 0xf3, 0x90, 0x0e, 0x2b, 0x47, 0x24, 0xb0, - 0x88, 0x1b, 0xf6, 0x1d, 0xeb, 0x8c, 0xf2, 0x23, 0x96, 0x82, 0x53, 0xbc, 0x27, 0xf9, 0xd2, 0xb2, - 0x56, 0xc2, 0xd1, 0x6c, 0x23, 0x32, 0xa2, 0xfa, 0xbf, 0x2a, 0x50, 0xe0, 0x29, 0x0b, 0x53, 0x0a, - 0xdf, 0xee, 0x79, 0x36, 0x20, 0x53, 0x5d, 0xc6, 0xe0, 0xb9, 0xc0, 0x7b, 0x50, 0xe6, 0xca, 0x4f, - 0x9c, 0x30, 0x78, 0x1e, 0xcc, 0x1b, 0xeb, 0x50, 0x0a, 0x88, 0x69, 0x7b, 0xee, 0x30, 0xba, 0x14, - 0x8b, 0x69, 0xf4, 0xbb, 0xa0, 0xf9, 0x81, 0xe7, 0x9b, 0x83, 0xc9, 0x39, 0x5a, 0x9a, 0x6f, 0x2d, - 0xc1, 0xe7, 0x29, 0xfa, 0xf7, 0x60, 0x95, 0x12, 0x11, 0xd9, 0x85, 0x93, 0x14, 0xc4, 0x67, 0x4a, - 0x26, 0x3f, 0x11, 0xe8, 0x5f, 0x41, 0x51, 0x6c, 0x5c, 0xd7, 0x90, 0xf7, 0x23, 0x40, 0x42, 0x91, - 0xcc, 0x41, 0x46, 0x0e, 0xa5, 0x32, 0xcb, 0xe6, 0x2f, 0xbb, 0xa2, 0xe5, 0x68, 0xd2, 0xa0, 0xff, - 0x97, 0x22, 0xf2, 0x6d, 0xf1, 0xe6, 0xc6, 0x12, 0x73, 0xb6, 0x6a, 0xd8, 0x31, 0x56, 0x5c, 0xee, - 0x45, 0x24, 0xea, 0x40, 0x51, 0xa6, 0xd5, 0xb9, 0x45, 0x9f, 0x2c, 0x25, 0x40, 0x74, 0xd5, 0x4f, - 0xe4, 0x45, 0xc7, 0xbc, 0x57, 0xfd, 0x44, 0x5c, 0xf5, 0x13, 0x76, 0x4a, 0x97, 0x09, 0xbf, 0x80, - 0xcb, 0xf3, 0x7c, 0xbf, 0x62, 0xc7, 0xef, 0x29, 0x44, 0xff, 0x1f, 0x25, 0x8e, 0x7b, 0xd1, 0xbb, - 0x07, 0xfa, 0x12, 0x4a, 0x2c, 0x84, 0x18, 0x23, 0xd3, 0x97, 0xaf, 0xf8, 0xad, 0xc5, 0x9e, 0x54, - 0xa2, 0x5d, 0x51, 0xa4, 0xeb, 0xcb, 0xbe, 0xa0, 0x58, 0xfc, 0x64, 0x47, 0xa5, 0x28, 0x7e, 0xb2, - 0xff, 0xe8, 0x03, 0xa8, 0x9a, 0xe3, 0xd0, 0x33, 0x4c, 0xfb, 0x9c, 0x04, 0xa1, 0x43, 0x89, 0xf4, - 0xa5, 0x55, 0xc6, 0x6d, 0x46, 0xcc, 0xfa, 0x7d, 0x58, 0x49, 0x62, 0xbe, 0x2d, 0x6f, 0x29, 0x24, - 0xf3, 0x96, 0x3f, 0x05, 0x98, 0xdc, 0x21, 0x32, 0x1f, 0x21, 0x17, 0x4e, 0x68, 0x58, 0xd1, 0xd9, - 0xbc, 0x80, 0x4b, 0x8c, 0xd1, 0x62, 0xce, 0x98, 0x7e, 0xe0, 0x28, 0x44, 0x0f, 0x1c, 0x2c, 0x3a, - 0xb0, 0x05, 0x7d, 0xe6, 0x0c, 0x87, 0xf1, 0xbd, 0x66, 0xd9, 0xf3, 0x46, 0x4f, 0x39, 0x43, 0xff, - 0x26, 0x27, 0x7c, 0x45, 0x3c, 0x55, 0x65, 0x3a, 0x9b, 0xbd, 0x2b, 0x53, 0xdf, 0x03, 0xa0, 0xa1, - 0x19, 0xb0, 0x24, 0xcc, 0x8c, 0x6e, 0x56, 0xeb, 0x53, 0x2f, 0x24, 0xfd, 0xa8, 0x76, 0x06, 0x97, - 0x65, 0xef, 0x66, 0x88, 0x3e, 0x83, 0x15, 0xcb, 0x1b, 0xf9, 0x43, 0x22, 0x07, 0x17, 0xde, 0x3a, - 0xb8, 0x12, 0xf7, 0x6f, 0x86, 0x89, 0xfb, 0xdc, 0xe2, 0x75, 0xef, 0x73, 0x7f, 0xad, 0x88, 0x17, - 0xb7, 0xe4, 0x83, 0x1f, 0x1a, 0xcc, 0xa8, 0x2a, 0x79, 0xbc, 0xe0, 0xeb, 0xe1, 0xb7, 0x95, 0x94, - 0xd4, 0x3f, 0xcb, 0x52, 0xc3, 0xf1, 0xe6, 0xb4, 0xf8, 0xdf, 0x55, 0x28, 0xc7, 0x8f, 0x6d, 0x53, - 0xb6, 0xff, 0x14, 0xca, 0x71, 0xe1, 0x92, 0x0c, 0x10, 0xdf, 0x6a, 0x9e, 0xb8, 0x33, 0x7a, 0x09, - 0xc8, 0x1c, 0x0c, 0xe2, 0x74, 0xd7, 0x18, 0x53, 0x73, 0x10, 0x3d, 0x75, 0x7e, 0x3a, 0x87, 0x1e, - 0xa2, 0xfd, 0xf1, 0x98, 0x8d, 0xc7, 0x9a, 0x39, 0x18, 0xa4, 0x38, 0xe8, 0xcf, 0x60, 0x2b, 0x3d, - 0x87, 0x71, 0x72, 0x69, 0xf8, 0x8e, 0x2d, 0xef, 0x00, 0xf6, 0xe6, 0x7d, 0x6f, 0x6c, 0xa4, 0xe0, - 0xbf, 0xb8, 0x3c, 0x72, 0x6c, 0xa1, 0x73, 0x14, 0x4c, 0x35, 0xd4, 0xff, 0x02, 0x6e, 0xbe, 0xa1, - 0xfb, 0x0c, 0x1b, 0x74, 0xd3, 0x75, 0x34, 0x8b, 0x2b, 0x21, 0x61, 0xbd, 0x5f, 0x29, 0xe2, 0x59, - 0x34, 0xad, 0x93, 0x66, 0x32, 0x4f, 0xbf, 0x93, 0x71, 0x9e, 0xd6, 0xd1, 0xb1, 0x80, 0xe7, 0xa9, - 0xf9, 0x93, 0x2b, 0xa9, 0x79, 0xd6, 0x84, 0x4c, 0x64, 0xb8, 0x02, 0x48, 0x22, 0xe8, 0xff, 0xa2, - 0x42, 0x29, 0x42, 0xe7, 0x27, 0xf8, 0x4b, 0x1a, 0x92, 0x91, 0x11, 0x5f, 0x2f, 0x2a, 0x18, 0x04, - 0x8b, 0xef, 0xa8, 0xef, 0x41, 0x79, 0x4c, 0x49, 0x20, 0x9a, 0x73, 0xbc, 0xb9, 0xc4, 0x18, 0xbc, - 0xf1, 0x7d, 0xa8, 0x84, 0x5e, 0x68, 0x0e, 0x8d, 0x90, 0xe7, 0x0b, 0xaa, 0x18, 0xcd, 0x59, 0x3c, - 0x5b, 0x40, 0x3f, 0x80, 0xf5, 0xf0, 0x34, 0xf0, 0xc2, 0x70, 0xc8, 0x72, 0x55, 0x9e, 0x39, 0x89, - 0x44, 0x27, 0x8f, 0xb5, 0xb8, 0x41, 0x64, 0x54, 0x94, 0x45, 0xef, 0x49, 0x67, 0xe6, 0xba, 0x3c, - 0x88, 0xe4, 0xf1, 0x6a, 0xcc, 0x65, 0xae, 0xcd, 0x36, 0x4f, 0x5f, 0x64, 0x24, 0x3c, 0x56, 0x28, - 0x38, 0x22, 0x91, 0x01, 0x6b, 0x23, 0x62, 0xd2, 0x71, 0x40, 0x6c, 0xe3, 0xa5, 0x43, 0x86, 0xb6, - 0xb8, 0x78, 0xa9, 0x66, 0x3e, 0x6e, 0x44, 0x6a, 0x69, 0x3c, 0xe2, 0xa3, 0x71, 0x35, 0x82, 0x13, - 0x34, 0xcb, 0x1c, 0xc4, 0x3f, 0xb4, 0x06, 0x95, 0xde, 0xf3, 0x5e, 0xbf, 0x7d, 0x60, 0x1c, 0x1c, - 0xee, 0xb6, 0x65, 0xa9, 0x54, 0xaf, 0x8d, 0x05, 0xa9, 0xb0, 0xf6, 0xfe, 0x61, 0xbf, 0xb9, 0x6f, - 0xf4, 0x3b, 0xad, 0xa7, 0x3d, 0x2d, 0x87, 0xb6, 0x60, 0xbd, 0xbf, 0x87, 0x0f, 0xfb, 0xfd, 0xfd, - 0xf6, 0xae, 0x71, 0xd4, 0xc6, 0x9d, 0xc3, 0xdd, 0x9e, 0xa6, 0x22, 0x04, 0xd5, 0x09, 0xbb, 0xdf, - 0x39, 0x68, 0x6b, 0x79, 0x54, 0x81, 0xe5, 0xa3, 0x36, 0x6e, 0xb5, 0xbb, 0x7d, 0xad, 0xa0, 0xff, - 0x52, 0x85, 0x4a, 0xc2, 0x8a, 0xcc, 0x91, 0x03, 0x2a, 0xce, 0x35, 0x79, 0xcc, 0xfe, 0xf2, 0xa7, - 0x5d, 0xd3, 0x3a, 0x15, 0xd6, 0xc9, 0x63, 0x41, 0xf0, 0xb3, 0x8c, 0x79, 0x91, 0x58, 0xe7, 0x79, - 0x5c, 0x1a, 0x99, 0x17, 0x02, 0xe4, 0xbb, 0xb0, 0x72, 0x46, 0x02, 0x97, 0x0c, 0x65, 0xbb, 0xb0, - 0x48, 0x45, 0xf0, 0x44, 0x97, 0x6d, 0xd0, 0x64, 0x97, 0x09, 0x8c, 0x30, 0x47, 0x55, 0xf0, 0x0f, - 0x22, 0xb0, 0x4d, 0x28, 0x88, 0xe6, 0x65, 0x31, 0x3f, 0x27, 0xd8, 0x36, 0x45, 0x5f, 0x9b, 0x3e, - 0xcf, 0x21, 0xf3, 0x98, 0xff, 0x47, 0x27, 0xd3, 0xf6, 0x29, 0x72, 0xfb, 0xdc, 0x9b, 0xdf, 0x9d, - 0xdf, 0x64, 0xa2, 0xd3, 0xd8, 0x44, 0xcb, 0xa0, 0xe2, 0xa8, 0xbe, 0xa8, 0xd5, 0x6c, 0xed, 0x31, - 0xb3, 0xac, 0x42, 0xf9, 0xa0, 0xf9, 0x53, 0xe3, 0xb8, 0x27, 0x6e, 0xf0, 0x35, 0x58, 0x79, 0xda, - 0xc6, 0xdd, 0xf6, 0xbe, 0xe4, 0xa8, 0x68, 0x13, 0x34, 0xc9, 0x99, 0xf4, 0xcb, 0x33, 0x04, 0xf1, - 0xb7, 0x80, 0x4a, 0x90, 0xef, 0x3d, 0x6b, 0x1e, 0x69, 0x45, 0xfd, 0xbf, 0x73, 0xb0, 0x26, 0xb6, - 0x85, 0xb8, 0x12, 0xe2, 0xcd, 0x2f, 0xc1, 0xc9, 0x5b, 0xac, 0x5c, 0xfa, 0x16, 0x2b, 0x4a, 0x42, - 0xf9, 0xae, 0xae, 0x4e, 0x92, 0x50, 0x7e, 0xb3, 0x93, 0x8a, 0xf8, 0xf9, 0x79, 0x22, 0x7e, 0x0d, - 0x96, 0x47, 0x84, 0xc6, 0x76, 0x2b, 0xe3, 0x88, 0x44, 0x0e, 0x54, 0x4c, 0xd7, 0xf5, 0x42, 0x53, - 0x5c, 0x0d, 0x17, 0xe7, 0xda, 0x0c, 0xaf, 0x7c, 0x71, 0xa3, 0x39, 0x41, 0x12, 0x81, 0x39, 0x89, - 0x5d, 0xff, 0x09, 0x68, 0x57, 0x3b, 0xcc, 0xb3, 0x1d, 0x7e, 0xff, 0x87, 0x93, 0xdd, 0x90, 0xb0, - 0x75, 0x21, 0xdf, 0x54, 0xb4, 0x25, 0x46, 0xe0, 0xe3, 0x6e, 0xb7, 0xd3, 0x7d, 0xac, 0x29, 0x08, - 0xa0, 0xd8, 0xfe, 0x69, 0xa7, 0xdf, 0xde, 0xd5, 0x72, 0x3b, 0xbf, 0x5a, 0x87, 0xa2, 0x10, 0x12, - 0x7d, 0x2d, 0x33, 0x81, 0x64, 0x95, 0x2d, 0xfa, 0xc9, 0xdc, 0x19, 0x75, 0xaa, 0x72, 0xb7, 0xfe, - 0x70, 0xe1, 0xf1, 0xf2, 0x55, 0x73, 0x09, 0xfd, 0x8d, 0x02, 0x2b, 0xa9, 0x17, 0xcd, 0xac, 0x57, - 0xe3, 0x33, 0x8a, 0x7a, 0xeb, 0x3f, 0x5e, 0x68, 0x6c, 0x2c, 0xcb, 0x2f, 0x14, 0xa8, 0x24, 0xca, - 0x59, 0xd1, 0xbd, 0x45, 0x4a, 0x60, 0x85, 0x24, 0xf7, 0x17, 0xaf, 0x9e, 0xd5, 0x97, 0x3e, 0x56, - 0xd0, 0x5f, 0x2b, 0x50, 0x49, 0x14, 0x76, 0x66, 0x16, 0x65, 0xba, 0x0c, 0x35, 0xb3, 0x28, 0xb3, - 0xea, 0x48, 0x97, 0xd0, 0x5f, 0x2a, 0x50, 0x8e, 0x8b, 0x34, 0xd1, 0xdd, 0xf9, 0xcb, 0x3a, 0x85, - 0x10, 0x9f, 0x2e, 0x5a, 0x0f, 0xaa, 0x2f, 0xa1, 0x3f, 0x87, 0x52, 0x54, 0xd1, 0x88, 0xb2, 0xee, - 0x5e, 0x57, 0xca, 0x25, 0xeb, 0x77, 0xe7, 0x1e, 0x97, 0x9c, 0x3e, 0x2a, 0x33, 0xcc, 0x3c, 0xfd, - 0x95, 0x82, 0xc8, 0xfa, 0xdd, 0xb9, 0xc7, 0xc5, 0xd3, 0x33, 0x4f, 0x48, 0x54, 0x23, 0x66, 0xf6, - 0x84, 0xe9, 0x32, 0xc8, 0xcc, 0x9e, 0x30, 0xab, 0xf8, 0x51, 0x08, 0x92, 0xa8, 0x67, 0xcc, 0x2c, - 0xc8, 0x74, 0xcd, 0x64, 0x66, 0x41, 0x66, 0x94, 0x4f, 0xea, 0x4b, 0xe8, 0xe7, 0x4a, 0xf2, 0x5c, - 0x70, 0x77, 0xee, 0xb2, 0xbd, 0x39, 0x5d, 0x72, 0xaa, 0x70, 0x90, 0x2f, 0xd0, 0x9f, 0xcb, 0x5b, - 0x0c, 0x51, 0xf5, 0x87, 0xe6, 0x01, 0x4b, 0x15, 0x0a, 0xd6, 0x3f, 0x59, 0x6c, 0xb3, 0xe1, 0x42, - 0xfc, 0x95, 0x02, 0x30, 0xa9, 0x0f, 0xcc, 0x2c, 0xc4, 0x54, 0x61, 0x62, 0xfd, 0xde, 0x02, 0x23, - 0x93, 0x0b, 0x24, 0xaa, 0x5f, 0xca, 0xbc, 0x40, 0xae, 0xd4, 0x2f, 0x66, 0x5e, 0x20, 0x57, 0x6b, - 0x0f, 0xf5, 0x25, 0xf4, 0x4f, 0x0a, 0xac, 0x4f, 0xd5, 0x4f, 0xa1, 0x87, 0xd7, 0x2c, 0xa1, 0xab, - 0x7f, 0xbe, 0x38, 0x40, 0x24, 0xda, 0xb6, 0xf2, 0xb1, 0x82, 0xfe, 0x56, 0x81, 0xd5, 0x74, 0x5d, - 0x49, 0xe6, 0x5d, 0x6a, 0x46, 0x25, 0x56, 0xfd, 0xc1, 0x62, 0x83, 0x63, 0x6d, 0xfd, 0xbd, 0x02, - 0xd5, 0x74, 0x89, 0x11, 0x7a, 0x30, 0x5f, 0x58, 0xb8, 0x22, 0xd0, 0x67, 0x0b, 0x8e, 0x8e, 0x24, - 0xfa, 0x62, 0xf9, 0x8f, 0x0a, 0x22, 0x7b, 0x2b, 0xf2, 0x9f, 0x1f, 0xfd, 0x26, 0x00, 0x00, 0xff, - 0xff, 0x5b, 0xc7, 0xff, 0xf4, 0x0c, 0x35, 0x00, 0x00, + // 3895 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x6f, 0x1b, 0x49, + 0x76, 0x57, 0xf3, 0x4b, 0xe4, 0xa3, 0x44, 0xb5, 0xca, 0x92, 0x4d, 0x73, 0x36, 0x19, 0x6f, 0x07, + 0x13, 0x28, 0xbb, 0x33, 0xf4, 0xac, 0x36, 0x19, 0x8f, 0x67, 0x3d, 0xeb, 0xe1, 0x50, 0xb4, 0x45, + 0x5b, 0xa2, 0x94, 0x22, 0x05, 0xaf, 0xe3, 0x64, 0x3a, 0xad, 0xee, 0x32, 0xd5, 0x16, 0xfb, 0x63, + 0xba, 0x9a, 0xb2, 0xb4, 0x41, 0x90, 0x60, 0x03, 0x04, 0x13, 0x20, 0x41, 0x72, 0x99, 0xec, 0x25, + 0xa7, 0x05, 0x72, 0x0a, 0x72, 0x0f, 0x36, 0xd8, 0x53, 0x0e, 0xf9, 0x27, 0x72, 0xc9, 0x2d, 0xd7, + 0xfc, 0x05, 0x09, 0xea, 0xa3, 0x9b, 0xdd, 0x24, 0x3d, 0x6e, 0x52, 0x3e, 0xb1, 0xdf, 0xab, 0xaa, + 0x5f, 0x3d, 0xbe, 0xf7, 0xea, 0xd5, 0xab, 0xaa, 0x07, 0x9a, 0x3f, 0x1a, 0x0f, 0x6d, 0x97, 0xde, + 0xb5, 0x02, 0xfb, 0x82, 0x04, 0xf4, 0xae, 0x1f, 0x78, 0xa1, 0x27, 0xa9, 0x26, 0x27, 0xd0, 0x07, + 0x67, 0x06, 0x3d, 0xb3, 0x4d, 0x2f, 0xf0, 0x9b, 0xae, 0xe7, 0x18, 0x56, 0x53, 0x8e, 0x69, 0xca, + 0x31, 0xa2, 0x5b, 0xe3, 0xb7, 0x87, 0x9e, 0x37, 0x1c, 0x11, 0x81, 0x70, 0x3a, 0x7e, 0x79, 0xd7, + 0x1a, 0x07, 0x46, 0x68, 0x7b, 0xae, 0x6c, 0x7f, 0x7f, 0xba, 0x3d, 0xb4, 0x1d, 0x42, 0x43, 0xc3, + 0xf1, 0x65, 0x87, 0x0f, 0x22, 0x59, 0xe8, 0x99, 0x11, 0x10, 0xeb, 0xee, 0x99, 0x39, 0xa2, 0x3e, + 0x31, 0xd9, 0xaf, 0xce, 0x3e, 0x64, 0xb7, 0x0f, 0xa7, 0xba, 0xd1, 0x30, 0x18, 0x9b, 0x61, 0x24, + 0xb9, 0x11, 0x86, 0x81, 0x7d, 0x3a, 0x0e, 0x89, 0xe8, 0xad, 0xdd, 0x86, 0x5b, 0x03, 0x83, 0x9e, + 0xb7, 0x3d, 0xf7, 0xa5, 0x3d, 0xec, 0x9b, 0x67, 0xc4, 0x31, 0x30, 0xf9, 0x7a, 0x4c, 0x68, 0xa8, + 0xfd, 0x31, 0xd4, 0x67, 0x9b, 0xa8, 0xef, 0xb9, 0x94, 0xa0, 0x2f, 0xa0, 0xc0, 0xa6, 0xac, 0x2b, + 0x77, 0x94, 0x9d, 0xea, 0xee, 0x87, 0xcd, 0x37, 0xa9, 0x40, 0xc8, 0xd0, 0x94, 0xa2, 0x36, 0xfb, + 0x3e, 0x31, 0x31, 0x1f, 0xa9, 0x6d, 0xc3, 0x8d, 0xb6, 0xe1, 0x1b, 0xa7, 0xf6, 0xc8, 0x0e, 0x6d, + 0x42, 0xa3, 0x49, 0xc7, 0xb0, 0x95, 0x66, 0xcb, 0x09, 0xff, 0x04, 0xd6, 0xcc, 0x04, 0x5f, 0x4e, + 0x7c, 0xbf, 0x99, 0x49, 0xf7, 0xcd, 0x3d, 0x4e, 0xa5, 0x80, 0x53, 0x70, 0xda, 0x16, 0xa0, 0x47, + 0xb6, 0x3b, 0x24, 0x81, 0x1f, 0xd8, 0x6e, 0x18, 0x09, 0xf3, 0x9b, 0x3c, 0xdc, 0x48, 0xb1, 0xa5, + 0x30, 0xaf, 0x00, 0x62, 0x3d, 0x32, 0x51, 0xf2, 0x3b, 0xd5, 0xdd, 0x27, 0x19, 0x45, 0x99, 0x83, + 0xd7, 0x6c, 0xc5, 0x60, 0x1d, 0x37, 0x0c, 0xae, 0x70, 0x02, 0x1d, 0x7d, 0x05, 0xa5, 0x33, 0x62, + 0x8c, 0xc2, 0xb3, 0x7a, 0xee, 0x8e, 0xb2, 0x53, 0xdb, 0x7d, 0x74, 0x8d, 0x79, 0xf6, 0x39, 0x50, + 0x3f, 0x34, 0x42, 0x82, 0x25, 0x2a, 0xfa, 0x08, 0x90, 0xf8, 0xd2, 0x2d, 0x42, 0xcd, 0xc0, 0xf6, + 0x99, 0x4b, 0xd6, 0xf3, 0x77, 0x94, 0x9d, 0x0a, 0xde, 0x14, 0x2d, 0x7b, 0x93, 0x86, 0x86, 0x0f, + 0x1b, 0x53, 0xd2, 0x22, 0x15, 0xf2, 0xe7, 0xe4, 0x8a, 0x5b, 0xa4, 0x82, 0xd9, 0x27, 0x7a, 0x0c, + 0xc5, 0x0b, 0x63, 0x34, 0x26, 0x5c, 0xe4, 0xea, 0xee, 0x8f, 0xde, 0xe6, 0x1e, 0xd2, 0x45, 0x27, + 0x7a, 0xc0, 0x62, 0xfc, 0x67, 0xb9, 0x4f, 0x15, 0xed, 0x3e, 0x54, 0x13, 0x72, 0xa3, 0x1a, 0xc0, + 0x49, 0x6f, 0xaf, 0x33, 0xe8, 0xb4, 0x07, 0x9d, 0x3d, 0x75, 0x05, 0xad, 0x43, 0xe5, 0xa4, 0xb7, + 0xdf, 0x69, 0x1d, 0x0c, 0xf6, 0x9f, 0xab, 0x0a, 0xaa, 0xc2, 0x6a, 0x44, 0xe4, 0xb4, 0x4b, 0x40, + 0x98, 0x98, 0xde, 0x05, 0x09, 0x98, 0x23, 0x4b, 0xab, 0xa2, 0x5b, 0xb0, 0x1a, 0x1a, 0xf4, 0x5c, + 0xb7, 0x2d, 0x29, 0x73, 0x89, 0x91, 0x5d, 0x0b, 0x75, 0xa1, 0x74, 0x66, 0xb8, 0xd6, 0xe8, 0xed, + 0x72, 0xa7, 0x55, 0xcd, 0xc0, 0xf7, 0xf9, 0x40, 0x2c, 0x01, 0x98, 0x77, 0xa7, 0x66, 0x16, 0x06, + 0xd0, 0x9e, 0x83, 0xda, 0x0f, 0x8d, 0x20, 0x4c, 0x8a, 0xd3, 0x81, 0x02, 0x9b, 0x5f, 0x7a, 0xf4, + 0x22, 0x73, 0x8a, 0x95, 0x89, 0xf9, 0x70, 0xed, 0x7f, 0x73, 0xb0, 0x99, 0xc0, 0x96, 0x9e, 0xfa, + 0x0c, 0x4a, 0x01, 0xa1, 0xe3, 0x51, 0xc8, 0xe1, 0x6b, 0xbb, 0x0f, 0x33, 0xc2, 0xcf, 0x20, 0x35, + 0x31, 0x87, 0xc1, 0x12, 0x0e, 0xed, 0x80, 0x2a, 0x46, 0xe8, 0x24, 0x08, 0xbc, 0x40, 0x77, 0xe8, + 0x90, 0x6b, 0xad, 0x82, 0x6b, 0x82, 0xdf, 0x61, 0xec, 0x43, 0x3a, 0x4c, 0x68, 0x35, 0x7f, 0x4d, + 0xad, 0x22, 0x03, 0x54, 0x97, 0x84, 0xaf, 0xbd, 0xe0, 0x5c, 0x67, 0xaa, 0x0d, 0x6c, 0x8b, 0xd4, + 0x0b, 0x1c, 0xf4, 0x93, 0x8c, 0xa0, 0x3d, 0x31, 0xfc, 0x48, 0x8e, 0xc6, 0x1b, 0x6e, 0x9a, 0xa1, + 0xfd, 0x10, 0x4a, 0xe2, 0x9f, 0x32, 0x4f, 0xea, 0x9f, 0xb4, 0xdb, 0x9d, 0x7e, 0x5f, 0x5d, 0x41, + 0x15, 0x28, 0xe2, 0xce, 0x00, 0x33, 0x0f, 0xab, 0x40, 0xf1, 0x51, 0x6b, 0xd0, 0x3a, 0x50, 0x73, + 0xda, 0x0f, 0x60, 0xe3, 0x99, 0x61, 0x87, 0x59, 0x9c, 0x4b, 0xf3, 0x40, 0x9d, 0xf4, 0x95, 0xd6, + 0xe9, 0xa6, 0xac, 0x93, 0x5d, 0x35, 0x9d, 0x4b, 0x3b, 0x9c, 0xb2, 0x87, 0x0a, 0x79, 0x12, 0x04, + 0xd2, 0x04, 0xec, 0x53, 0x7b, 0x0d, 0x1b, 0xfd, 0xd0, 0xf3, 0x33, 0x79, 0xfe, 0x8f, 0x61, 0x95, + 0xed, 0x36, 0xde, 0x38, 0x94, 0xae, 0x7f, 0xbb, 0x29, 0x76, 0xa3, 0x66, 0xb4, 0x1b, 0x35, 0xf7, + 0xe4, 0x6e, 0x85, 0xa3, 0x9e, 0xe8, 0x26, 0x94, 0xa8, 0x3d, 0x74, 0x8d, 0x91, 0x8c, 0x16, 0x92, + 0xd2, 0x10, 0x73, 0xf2, 0x68, 0x62, 0xe9, 0xf8, 0x6d, 0x40, 0x7b, 0x84, 0x86, 0x81, 0x77, 0x95, + 0x49, 0x9e, 0x2d, 0x28, 0xbe, 0xf4, 0x02, 0x53, 0x2c, 0xc4, 0x32, 0x16, 0x04, 0x5b, 0x54, 0x29, + 0x10, 0x89, 0xfd, 0x11, 0xa0, 0xae, 0xcb, 0xf6, 0x94, 0x6c, 0x86, 0xf8, 0x87, 0x1c, 0xdc, 0x48, + 0xf5, 0x97, 0xc6, 0x58, 0x7e, 0x1d, 0xb2, 0xc0, 0x34, 0xa6, 0x62, 0x1d, 0xa2, 0x23, 0x28, 0x89, + 0x1e, 0x52, 0x93, 0xf7, 0x16, 0x00, 0x12, 0xdb, 0x94, 0x84, 0x93, 0x30, 0x73, 0x9d, 0x3e, 0xff, + 0x6e, 0x9d, 0xfe, 0x35, 0xa8, 0xd1, 0xff, 0xa0, 0x6f, 0xb5, 0xcd, 0x13, 0xb8, 0x61, 0x7a, 0xa3, + 0x11, 0x31, 0x99, 0x37, 0xe8, 0xb6, 0x1b, 0x92, 0xe0, 0xc2, 0x18, 0xbd, 0xdd, 0x6f, 0xd0, 0x64, + 0x54, 0x57, 0x0e, 0xd2, 0x5e, 0xc0, 0x66, 0x62, 0x62, 0x69, 0x88, 0x47, 0x50, 0xa4, 0x8c, 0x21, + 0x2d, 0xf1, 0xf1, 0x82, 0x96, 0xa0, 0x58, 0x0c, 0xd7, 0x6e, 0x08, 0xf0, 0xce, 0x05, 0x71, 0xe3, + 0xbf, 0xa5, 0xed, 0xc1, 0x66, 0x9f, 0xbb, 0x69, 0x26, 0x3f, 0x9c, 0xb8, 0x78, 0x2e, 0xe5, 0xe2, + 0x5b, 0x80, 0x92, 0x28, 0xd2, 0x11, 0xaf, 0x60, 0xa3, 0x73, 0x49, 0xcc, 0x4c, 0xc8, 0x75, 0x58, + 0x35, 0x3d, 0xc7, 0x31, 0x5c, 0xab, 0x9e, 0xbb, 0x93, 0xdf, 0xa9, 0xe0, 0x88, 0x4c, 0xae, 0xc5, + 0x7c, 0xd6, 0xb5, 0xa8, 0xfd, 0x9d, 0x02, 0xea, 0x64, 0x6e, 0xa9, 0x48, 0x26, 0x7d, 0x68, 0x31, + 0x20, 0x36, 0xf7, 0x1a, 0x96, 0x94, 0xe4, 0x47, 0xe1, 0x42, 0xf0, 0x49, 0x10, 0x24, 0xc2, 0x51, + 0xfe, 0x9a, 0xe1, 0x48, 0xdb, 0x87, 0xef, 0x45, 0xe2, 0xf4, 0xc3, 0x80, 0x18, 0x8e, 0xed, 0x0e, + 0xbb, 0x47, 0x47, 0x3e, 0x11, 0x82, 0x23, 0x04, 0x05, 0xcb, 0x08, 0x0d, 0x29, 0x18, 0xff, 0x66, + 0x8b, 0xde, 0x1c, 0x79, 0x34, 0x5e, 0xf4, 0x9c, 0xd0, 0xfe, 0x33, 0x0f, 0xf5, 0x19, 0xa8, 0x48, + 0xbd, 0x2f, 0xa0, 0x48, 0x49, 0x38, 0xf6, 0xa5, 0xab, 0x74, 0x32, 0x0b, 0x3c, 0x1f, 0xaf, 0xd9, + 0x67, 0x60, 0x58, 0x60, 0xa2, 0x21, 0x94, 0xc3, 0xf0, 0x4a, 0xa7, 0xf6, 0xcf, 0xa3, 0x84, 0xe0, + 0xe0, 0xba, 0xf8, 0x03, 0x12, 0x38, 0xb6, 0x6b, 0x8c, 0xfa, 0xf6, 0xcf, 0x09, 0x5e, 0x0d, 0xc3, + 0x2b, 0xf6, 0x81, 0x9e, 0x33, 0x87, 0xb7, 0x6c, 0x57, 0xaa, 0xbd, 0xbd, 0xec, 0x2c, 0x09, 0x05, + 0x63, 0x81, 0xd8, 0x38, 0x80, 0x22, 0xff, 0x4f, 0xcb, 0x38, 0xa2, 0x0a, 0xf9, 0x30, 0xbc, 0xe2, + 0x42, 0x95, 0x31, 0xfb, 0x6c, 0x3c, 0x80, 0xb5, 0xe4, 0x3f, 0x60, 0x8e, 0x74, 0x46, 0xec, 0xe1, + 0x99, 0x70, 0xb0, 0x22, 0x96, 0x14, 0xb3, 0xe4, 0x6b, 0xdb, 0x92, 0x29, 0x6b, 0x11, 0x0b, 0x42, + 0xfb, 0xb7, 0x1c, 0xdc, 0x9e, 0xa3, 0x19, 0xe9, 0xac, 0x2f, 0x52, 0xce, 0xfa, 0x8e, 0xb4, 0x10, + 0x79, 0xfc, 0x8b, 0x94, 0xc7, 0xbf, 0x43, 0x70, 0xb6, 0x6c, 0x6e, 0x42, 0x89, 0x5c, 0xda, 0x21, + 0xb1, 0xa4, 0xaa, 0x24, 0x95, 0x58, 0x4e, 0x85, 0xeb, 0x2e, 0xa7, 0x43, 0xd8, 0x6a, 0x07, 0xc4, + 0x08, 0x89, 0x0c, 0xe5, 0x91, 0xff, 0xdf, 0x86, 0xb2, 0x31, 0x1a, 0x79, 0xe6, 0xc4, 0xac, 0xab, + 0x9c, 0xee, 0x5a, 0xa8, 0x01, 0xe5, 0x33, 0x8f, 0x86, 0xae, 0xe1, 0x10, 0x19, 0xbc, 0x62, 0x5a, + 0xfb, 0x56, 0x81, 0xed, 0x29, 0x3c, 0x69, 0x85, 0x53, 0xa8, 0xd9, 0xd4, 0x1b, 0xf1, 0x3f, 0xa8, + 0x27, 0x4e, 0x78, 0x3f, 0x59, 0x6c, 0xab, 0xe9, 0x46, 0x18, 0xfc, 0xc0, 0xb7, 0x6e, 0x27, 0x49, + 0xee, 0x71, 0x7c, 0x72, 0x4b, 0xae, 0xf4, 0x88, 0xd4, 0xfe, 0x51, 0x81, 0x6d, 0xb9, 0xc3, 0x67, + 0xff, 0xa3, 0xb3, 0x22, 0xe7, 0xde, 0xb5, 0xc8, 0x5a, 0x1d, 0x6e, 0x4e, 0xcb, 0x25, 0x63, 0xfe, + 0xbf, 0x16, 0x01, 0xcd, 0x9e, 0x2e, 0xd1, 0xf7, 0x61, 0x8d, 0x12, 0xd7, 0xd2, 0xc5, 0x7e, 0x21, + 0xb6, 0xb2, 0x32, 0xae, 0x32, 0x9e, 0xd8, 0x38, 0x28, 0x0b, 0x81, 0xe4, 0x52, 0x4a, 0x5b, 0xc6, + 0xfc, 0x1b, 0x9d, 0xc1, 0xda, 0x4b, 0xaa, 0xc7, 0x73, 0x73, 0x87, 0xaa, 0x65, 0x0e, 0x6b, 0xb3, + 0x72, 0x34, 0x1f, 0xf5, 0xe3, 0xff, 0x85, 0xab, 0x2f, 0x69, 0x4c, 0xa0, 0x6f, 0x14, 0xb8, 0x15, + 0xa5, 0x15, 0x13, 0xf5, 0x39, 0x9e, 0x45, 0x68, 0xbd, 0x70, 0x27, 0xbf, 0x53, 0xdb, 0x3d, 0xbe, + 0x86, 0xfe, 0x66, 0x98, 0x87, 0x9e, 0x45, 0xf0, 0xb6, 0x3b, 0x87, 0x4b, 0x51, 0x13, 0x6e, 0x38, + 0x63, 0x1a, 0xea, 0xc2, 0x0b, 0x74, 0xd9, 0xa9, 0x5e, 0xe4, 0x7a, 0xd9, 0x64, 0x4d, 0x29, 0x5f, + 0x45, 0xe7, 0xb0, 0xee, 0x78, 0x63, 0x37, 0xd4, 0x4d, 0x7e, 0xfe, 0xa1, 0xf5, 0xd2, 0x42, 0x07, + 0xe3, 0x39, 0x5a, 0x3a, 0x64, 0x70, 0xe2, 0x34, 0x45, 0xf1, 0x9a, 0x93, 0xa0, 0x98, 0x21, 0x03, + 0xe2, 0x78, 0x21, 0xd1, 0x59, 0xbc, 0xa4, 0xf5, 0x55, 0x61, 0x48, 0xc1, 0x63, 0xa1, 0x81, 0xa2, + 0xdf, 0x87, 0x9b, 0x96, 0x4d, 0x8d, 0xd3, 0x11, 0xd1, 0x47, 0xde, 0x50, 0x9f, 0xa4, 0x39, 0xf5, + 0x32, 0xef, 0xbc, 0x25, 0x5b, 0x0f, 0xbc, 0x61, 0x3b, 0x6e, 0xd3, 0x9a, 0x50, 0x4d, 0x18, 0x07, + 0x95, 0xa1, 0xd0, 0x3b, 0xea, 0x75, 0xd4, 0x15, 0x04, 0x50, 0x6a, 0xef, 0xe3, 0xa3, 0xa3, 0x81, + 0x38, 0x6b, 0x74, 0x0f, 0x5b, 0x8f, 0x3b, 0x6a, 0x4e, 0xeb, 0xc0, 0x5a, 0x52, 0x4c, 0x84, 0xa0, + 0x76, 0xd2, 0x7b, 0xda, 0x3b, 0x7a, 0xd6, 0xd3, 0x0f, 0x8f, 0x4e, 0x7a, 0x03, 0x76, 0x4a, 0xa9, + 0x01, 0xb4, 0x7a, 0xcf, 0x27, 0xf4, 0x3a, 0x54, 0x7a, 0x47, 0x11, 0xa9, 0x34, 0x72, 0xaa, 0xa2, + 0xfd, 0x47, 0x1e, 0xb6, 0xe6, 0x59, 0x0c, 0x59, 0x50, 0x60, 0xd6, 0x97, 0xe7, 0xc4, 0x77, 0x6f, + 0x7c, 0x8e, 0xce, 0x9c, 0xde, 0x37, 0xe4, 0xc6, 0x50, 0xc1, 0xfc, 0x1b, 0xe9, 0x50, 0x1a, 0x19, + 0xa7, 0x64, 0x44, 0xeb, 0x79, 0x7e, 0x93, 0xf2, 0xf8, 0x3a, 0x73, 0x1f, 0x70, 0x24, 0x71, 0x8d, + 0x22, 0x61, 0xd1, 0x00, 0xaa, 0x2c, 0xf4, 0x51, 0xa1, 0x3a, 0x19, 0x8d, 0x77, 0x33, 0xce, 0xb2, + 0x3f, 0x19, 0x89, 0x93, 0x30, 0x8d, 0xfb, 0x50, 0x4d, 0x4c, 0x36, 0xe7, 0x16, 0x64, 0x2b, 0x79, + 0x0b, 0x52, 0x49, 0x5e, 0x69, 0x3c, 0x9c, 0xb5, 0x01, 0xd3, 0x11, 0x73, 0x82, 0xfd, 0xa3, 0xfe, + 0x40, 0x9c, 0x37, 0x1f, 0xe3, 0xa3, 0x93, 0x63, 0x55, 0x61, 0xcc, 0x41, 0xab, 0xff, 0x54, 0xcd, + 0xc5, 0x3e, 0x92, 0xd7, 0xda, 0x50, 0x4d, 0xc8, 0x95, 0x8a, 0xf5, 0x4a, 0x3a, 0xd6, 0xb3, 0x68, + 0x6b, 0x58, 0x56, 0x40, 0x28, 0x95, 0x72, 0x44, 0xa4, 0xf6, 0x02, 0x2a, 0x7b, 0xbd, 0xbe, 0x84, + 0xa8, 0xc3, 0x2a, 0x25, 0x01, 0xfb, 0xdf, 0xfc, 0x3e, 0xab, 0x82, 0x23, 0x92, 0x81, 0x53, 0x62, + 0x04, 0xe6, 0x19, 0xa1, 0x32, 0x43, 0x88, 0x69, 0x36, 0xca, 0xe3, 0xf7, 0x42, 0xc2, 0x76, 0x15, + 0x1c, 0x91, 0xda, 0xff, 0x95, 0x01, 0x26, 0x77, 0x14, 0xa8, 0x06, 0xb9, 0x38, 0x72, 0xe7, 0x6c, + 0x8b, 0xf9, 0x41, 0x62, 0x67, 0xe2, 0xdf, 0x68, 0x17, 0xb6, 0x1d, 0x3a, 0xf4, 0x0d, 0xf3, 0x5c, + 0x97, 0x57, 0x0b, 0x62, 0x81, 0xf3, 0x28, 0xb8, 0x86, 0x6f, 0xc8, 0x46, 0xb9, 0x7e, 0x05, 0xee, + 0x01, 0xe4, 0x89, 0x7b, 0xc1, 0x23, 0x56, 0x75, 0xf7, 0xb3, 0x85, 0xef, 0x4e, 0x9a, 0x1d, 0xf7, + 0x42, 0xf8, 0x0a, 0x83, 0x41, 0x3a, 0x80, 0x45, 0x2e, 0x6c, 0x93, 0xe8, 0x0c, 0xb4, 0xc8, 0x41, + 0xbf, 0x58, 0x1c, 0x74, 0x8f, 0x63, 0xc4, 0xd0, 0x15, 0x2b, 0xa2, 0x51, 0x0f, 0x2a, 0x01, 0xa1, + 0xde, 0x38, 0x30, 0x89, 0x08, 0x5b, 0xd9, 0x8f, 0x37, 0x38, 0x1a, 0x87, 0x27, 0x10, 0x68, 0x0f, + 0x4a, 0x3c, 0x5a, 0xb1, 0xb8, 0x94, 0xff, 0xce, 0x8b, 0xd8, 0x34, 0x18, 0x8f, 0x24, 0x58, 0x8e, + 0x45, 0x8f, 0x61, 0x55, 0x88, 0x48, 0xeb, 0x65, 0x0e, 0xf3, 0x51, 0xd6, 0x50, 0xca, 0x47, 0xe1, + 0x68, 0x34, 0xb3, 0xea, 0x98, 0x92, 0xa0, 0x5e, 0x11, 0x56, 0x65, 0xdf, 0xe8, 0x3d, 0xa8, 0x88, + 0x9d, 0xdb, 0xb2, 0x83, 0x3a, 0x08, 0xe7, 0xe4, 0x8c, 0x3d, 0x3b, 0x40, 0xef, 0x43, 0x55, 0x64, + 0x68, 0x3a, 0x8f, 0x0a, 0x55, 0xde, 0x0c, 0x82, 0x75, 0xcc, 0x62, 0x83, 0xe8, 0x40, 0x82, 0x40, + 0x74, 0x58, 0x8b, 0x3b, 0x90, 0x20, 0xe0, 0x1d, 0x7e, 0x17, 0x36, 0x78, 0x5e, 0x3b, 0x0c, 0xbc, + 0xb1, 0xaf, 0x73, 0x9f, 0x5a, 0xe7, 0x9d, 0xd6, 0x19, 0xfb, 0x31, 0xe3, 0xf6, 0x98, 0x73, 0xdd, + 0x86, 0xf2, 0x2b, 0xef, 0x54, 0x74, 0xa8, 0x89, 0x75, 0xf0, 0xca, 0x3b, 0x8d, 0x9a, 0xe2, 0xdc, + 0x62, 0x23, 0x9d, 0x5b, 0x7c, 0x0d, 0x37, 0x67, 0x37, 0x49, 0x9e, 0x63, 0xa8, 0xd7, 0xcf, 0x31, + 0xb6, 0xdc, 0x79, 0x71, 0xf8, 0x4b, 0xc8, 0x5b, 0x2e, 0xad, 0x6f, 0x2e, 0xe4, 0x1c, 0xf1, 0x3a, + 0xc6, 0x6c, 0x30, 0xda, 0x86, 0x12, 0xfb, 0xb3, 0xb6, 0x55, 0x47, 0x22, 0xf4, 0xbc, 0xf2, 0x4e, + 0xbb, 0x16, 0xfa, 0x1e, 0x54, 0xd8, 0xff, 0xa7, 0xbe, 0x61, 0x92, 0xfa, 0x0d, 0xde, 0x32, 0x61, + 0x30, 0x43, 0xb9, 0x9e, 0x45, 0x84, 0x8a, 0xb6, 0x84, 0xa1, 0x18, 0x83, 0xeb, 0xe8, 0x16, 0xac, + 0xf2, 0x46, 0xdb, 0xaa, 0x6f, 0x8b, 0xe3, 0x03, 0x23, 0xbb, 0x16, 0xd2, 0x60, 0xdd, 0x37, 0x02, + 0xe2, 0x86, 0xba, 0x9c, 0xf1, 0x26, 0x6f, 0xae, 0x0a, 0xe6, 0x13, 0x36, 0x6f, 0xe3, 0x13, 0x28, + 0x47, 0x8b, 0x61, 0x91, 0x30, 0xd9, 0x78, 0x00, 0xb5, 0xf4, 0x52, 0x5a, 0x28, 0xc8, 0xfe, 0x73, + 0x0e, 0x2a, 0xf1, 0xa2, 0x41, 0x2e, 0xdc, 0xe0, 0x46, 0x65, 0x79, 0xa6, 0x3e, 0x59, 0x83, 0x22, + 0xbb, 0xfd, 0x3c, 0xa3, 0x9a, 0x5b, 0x11, 0x82, 0x3c, 0x66, 0xcb, 0x05, 0x89, 0x62, 0xe4, 0xc9, + 0x7c, 0x5f, 0xc1, 0xc6, 0xc8, 0x76, 0xc7, 0x97, 0x89, 0xb9, 0x44, 0x5a, 0xfa, 0x07, 0x19, 0xe7, + 0x3a, 0x60, 0xa3, 0x27, 0x73, 0xd4, 0x46, 0x29, 0x1a, 0xed, 0x43, 0xd1, 0xf7, 0x82, 0x30, 0xda, + 0x33, 0xb3, 0xee, 0x66, 0xc7, 0x5e, 0x10, 0x1e, 0x1a, 0xbe, 0xcf, 0x4e, 0x5e, 0x02, 0x40, 0xfb, + 0x36, 0x07, 0x37, 0xe7, 0xff, 0x31, 0xd4, 0x83, 0xbc, 0xe9, 0x8f, 0xa5, 0x92, 0x1e, 0x2c, 0xaa, + 0xa4, 0xb6, 0x3f, 0x9e, 0xc8, 0xcf, 0x80, 0xd0, 0x33, 0x28, 0x39, 0xc4, 0xf1, 0x82, 0x2b, 0xa9, + 0x8b, 0x87, 0x8b, 0x42, 0x1e, 0xf2, 0xd1, 0x13, 0x54, 0x09, 0x87, 0x30, 0x94, 0xe5, 0x62, 0xa2, + 0x32, 0x6c, 0x2f, 0x78, 0x37, 0x16, 0x41, 0xe2, 0x18, 0x47, 0xfb, 0x04, 0xb6, 0xe7, 0xfe, 0x15, + 0xf4, 0x5b, 0x00, 0xa6, 0x3f, 0xd6, 0xf9, 0xdb, 0x85, 0xf0, 0xa0, 0x3c, 0xae, 0x98, 0xfe, 0xb8, + 0xcf, 0x19, 0xda, 0x0b, 0xa8, 0xbf, 0x49, 0x5e, 0xb6, 0xc6, 0x84, 0xc4, 0xba, 0x73, 0xca, 0x75, + 0x90, 0xc7, 0x65, 0xc1, 0x38, 0x3c, 0x65, 0x4b, 0x29, 0x6a, 0x34, 0x2e, 0x59, 0x87, 0x3c, 0xef, + 0x50, 0x95, 0x1d, 0x8c, 0xcb, 0xc3, 0x53, 0xed, 0x97, 0x39, 0xd8, 0x98, 0x12, 0x99, 0x9d, 0x3f, + 0x45, 0x00, 0x8e, 0x4e, 0xf6, 0x82, 0x62, 0xd1, 0xd8, 0xb4, 0xad, 0xe8, 0x4e, 0x98, 0x7f, 0xf3, + 0x7d, 0xd8, 0x97, 0xf7, 0xb5, 0x39, 0xdb, 0x67, 0xcb, 0xc7, 0x39, 0xb5, 0x43, 0xca, 0x93, 0xa2, + 0x22, 0x16, 0x04, 0x7a, 0x0e, 0xb5, 0x80, 0xf0, 0xfd, 0xdf, 0xd2, 0x85, 0x97, 0x15, 0x17, 0xf2, + 0x32, 0x29, 0x21, 0x73, 0x36, 0xbc, 0x1e, 0x21, 0x31, 0x8a, 0xa2, 0x67, 0xb0, 0x6e, 0x5d, 0xb9, + 0x86, 0x63, 0x9b, 0x12, 0xb9, 0xb4, 0x34, 0xf2, 0x9a, 0x04, 0xe2, 0xc0, 0xda, 0x7d, 0xa8, 0x26, + 0x1a, 0xd9, 0x1f, 0xe3, 0xd9, 0x9f, 0xd4, 0x89, 0x20, 0xd2, 0xd1, 0xa2, 0x28, 0xa3, 0x85, 0x76, + 0x0a, 0xd5, 0xc4, 0xba, 0x58, 0x64, 0x28, 0xd3, 0x67, 0xe8, 0x71, 0x7d, 0x16, 0x71, 0x2e, 0xf4, + 0x58, 0x9c, 0x64, 0x99, 0x97, 0x6e, 0xfb, 0x5c, 0xa3, 0x15, 0x5c, 0x62, 0x64, 0xd7, 0xd7, 0x7e, + 0x9d, 0x83, 0x5a, 0x7a, 0x49, 0x47, 0x7e, 0xe4, 0x93, 0xc0, 0xf6, 0xac, 0x84, 0x1f, 0x1d, 0x73, + 0x06, 0xf3, 0x15, 0xd6, 0xfc, 0xf5, 0xd8, 0x0b, 0x8d, 0xc8, 0x57, 0x4c, 0x7f, 0xfc, 0x87, 0x8c, + 0x9e, 0xf2, 0xc1, 0xfc, 0x94, 0x0f, 0xa2, 0x0f, 0x01, 0x49, 0x57, 0x1a, 0xd9, 0x8e, 0x1d, 0xea, + 0xa7, 0x57, 0x21, 0x11, 0x36, 0xce, 0x63, 0x55, 0xb4, 0x1c, 0xb0, 0x86, 0x2f, 0x19, 0x9f, 0x39, + 0x9e, 0xe7, 0x39, 0x3a, 0x35, 0xbd, 0x80, 0xe8, 0x86, 0xf5, 0x8a, 0x1f, 0xbd, 0xf2, 0xb8, 0xea, + 0x79, 0x4e, 0x9f, 0xf1, 0x5a, 0xd6, 0x2b, 0xb6, 0x11, 0x9b, 0xfe, 0x98, 0x92, 0x50, 0x67, 0x3f, + 0x3c, 0x77, 0xa9, 0x60, 0x10, 0xac, 0xb6, 0x3f, 0xa6, 0xe8, 0x77, 0x60, 0x3d, 0xea, 0xc0, 0xf7, + 0x62, 0x99, 0x04, 0xac, 0xc9, 0x2e, 0x9c, 0x87, 0x34, 0x58, 0x3b, 0x26, 0x81, 0x49, 0xdc, 0x70, + 0x60, 0x9b, 0xe7, 0x94, 0x1f, 0x90, 0x14, 0x9c, 0xe2, 0x3d, 0x29, 0x94, 0x57, 0xd5, 0x32, 0x8e, + 0x66, 0x73, 0x88, 0x43, 0xb5, 0x6f, 0x14, 0x28, 0xf2, 0x94, 0x85, 0x29, 0x85, 0x6f, 0xf7, 0x3c, + 0x1b, 0x90, 0xa9, 0x2e, 0x63, 0xf0, 0x5c, 0xe0, 0x3d, 0xa8, 0x70, 0xe5, 0x27, 0x4e, 0x18, 0x3c, + 0x0f, 0xe6, 0x8d, 0x0d, 0x28, 0x07, 0xc4, 0xb0, 0x3c, 0x77, 0x14, 0x5d, 0x69, 0xc5, 0x34, 0xfa, + 0x3d, 0x50, 0xfd, 0xc0, 0xf3, 0x8d, 0xe1, 0xe4, 0x14, 0x2c, 0xcd, 0xb7, 0x91, 0xe0, 0xb3, 0x14, + 0x5d, 0xfb, 0x1a, 0x4a, 0x62, 0x4f, 0xba, 0x86, 0x28, 0x1f, 0x01, 0x12, 0x3a, 0x62, 0xb6, 0x77, + 0x6c, 0x4a, 0x65, 0x02, 0xcd, 0x9f, 0x5c, 0x45, 0xcb, 0xf1, 0xa4, 0x41, 0xfb, 0x2f, 0x45, 0xa4, + 0xd2, 0xe2, 0x31, 0x8c, 0xe5, 0xdc, 0x6c, 0x41, 0xb0, 0xf3, 0xa5, 0xb8, 0x75, 0x8b, 0x48, 0xd4, + 0x85, 0x92, 0xcc, 0x98, 0x73, 0xcb, 0xbe, 0x25, 0x4a, 0x80, 0xe8, 0x0e, 0x9e, 0xc8, 0x1b, 0x88, + 0x45, 0xef, 0xe0, 0x89, 0xb8, 0x83, 0x27, 0xec, 0xf8, 0x2c, 0x73, 0x79, 0x01, 0x57, 0xe0, 0xa9, + 0x7c, 0xd5, 0x8a, 0x1f, 0x3a, 0x88, 0xf6, 0x3f, 0x4a, 0x1c, 0xd2, 0xa2, 0x07, 0x09, 0xf4, 0x15, + 0x94, 0x59, 0x74, 0xd0, 0x1d, 0xc3, 0x97, 0xcf, 0xeb, 0xed, 0xe5, 0xde, 0x3a, 0xa2, 0x0d, 0x4f, + 0x64, 0xe2, 0xab, 0xbe, 0xa0, 0x58, 0x68, 0x64, 0xa7, 0xa0, 0x28, 0x34, 0xb2, 0x6f, 0xf4, 0x01, + 0xd4, 0x8c, 0x71, 0xe8, 0xe9, 0x86, 0x75, 0x41, 0x82, 0xd0, 0xa6, 0x44, 0xba, 0xc9, 0x3a, 0xe3, + 0xb6, 0x22, 0x66, 0xe3, 0x33, 0x58, 0x4b, 0x62, 0xbe, 0x2d, 0x25, 0x29, 0x26, 0x53, 0x92, 0x3f, + 0x05, 0x98, 0x5c, 0xee, 0x31, 0x1f, 0x21, 0x97, 0x76, 0xa8, 0x9b, 0xd1, 0xb1, 0xbb, 0x88, 0xcb, + 0x8c, 0xd1, 0x66, 0x47, 0xc1, 0xf4, 0xcb, 0x43, 0x31, 0x7a, 0x79, 0x60, 0x0b, 0x9f, 0xad, 0xd5, + 0x73, 0x7b, 0x34, 0x8a, 0x2f, 0x1c, 0x2b, 0x9e, 0xe7, 0x3c, 0xe5, 0x0c, 0xed, 0x37, 0x39, 0xe1, + 0x2b, 0xe2, 0x0d, 0x29, 0xd3, 0xb1, 0xeb, 0x5d, 0x99, 0xfa, 0x3e, 0x00, 0x0d, 0x8d, 0x80, 0xe5, + 0x57, 0x46, 0x74, 0xe5, 0xd9, 0x98, 0x79, 0xba, 0x18, 0x44, 0x45, 0x2d, 0xb8, 0x22, 0x7b, 0xb7, + 0x42, 0xf4, 0x39, 0xac, 0x99, 0x9e, 0xe3, 0x8f, 0x88, 0x1c, 0x5c, 0x7c, 0xeb, 0xe0, 0x6a, 0xdc, + 0xbf, 0x15, 0x26, 0x2e, 0x5a, 0x4b, 0xd7, 0xbd, 0x68, 0xfd, 0xb5, 0x22, 0x9e, 0xc2, 0x92, 0x2f, + 0x71, 0x68, 0x38, 0xa7, 0xdc, 0xe3, 0xf1, 0x92, 0xcf, 0x7a, 0xdf, 0x55, 0xeb, 0xd1, 0xf8, 0x3c, + 0x4b, 0x71, 0xc5, 0x9b, 0x33, 0xde, 0x7f, 0xcf, 0x43, 0x25, 0x7e, 0x05, 0x9b, 0xb1, 0xfd, 0xa7, + 0x50, 0x89, 0x2b, 0x8a, 0x64, 0x80, 0xf8, 0x4e, 0xf3, 0xc4, 0x9d, 0xd1, 0x4b, 0x40, 0xc6, 0x70, + 0x18, 0x67, 0xb2, 0xfa, 0x98, 0x1a, 0xc3, 0xe8, 0x0d, 0xf2, 0xd3, 0x05, 0xf4, 0x10, 0x6d, 0x7d, + 0x27, 0x6c, 0x3c, 0x56, 0x8d, 0xe1, 0x30, 0xc5, 0x41, 0x7f, 0x06, 0xdb, 0xe9, 0x39, 0xf4, 0xd3, + 0x2b, 0xdd, 0xb7, 0x2d, 0x79, 0xbc, 0xdf, 0x5f, 0xf4, 0x21, 0xb0, 0x99, 0x82, 0xff, 0xf2, 0xea, + 0xd8, 0xb6, 0x84, 0xce, 0x51, 0x30, 0xd3, 0xd0, 0xf8, 0x0b, 0xb8, 0xf5, 0x86, 0xee, 0x73, 0x6c, + 0xd0, 0x4b, 0x17, 0xb8, 0x2c, 0xaf, 0x84, 0x84, 0xf5, 0x7e, 0xa5, 0x88, 0xf7, 0xca, 0xb4, 0x4e, + 0x5a, 0xc9, 0x14, 0xfc, 0x6e, 0xc6, 0x79, 0xda, 0xc7, 0x27, 0x02, 0x9e, 0x67, 0xdd, 0x4f, 0xa6, + 0xb2, 0xee, 0xac, 0xb9, 0x96, 0x48, 0x5e, 0x05, 0x90, 0x44, 0xd0, 0xfe, 0x25, 0x0f, 0xe5, 0x08, + 0x9d, 0x1f, 0xce, 0xaf, 0x68, 0x48, 0x1c, 0x3d, 0xbe, 0x39, 0x54, 0x30, 0x08, 0x16, 0xbf, 0xcf, + 0x7a, 0x0f, 0x2a, 0x63, 0x4a, 0x02, 0xd1, 0x9c, 0xe3, 0xcd, 0x65, 0xc6, 0xe0, 0x8d, 0xef, 0x43, + 0x35, 0xf4, 0x42, 0x63, 0xa4, 0x87, 0x3c, 0x15, 0xc8, 0x8b, 0xd1, 0x9c, 0xc5, 0x13, 0x01, 0xf4, + 0x43, 0xd8, 0x0c, 0xcf, 0x02, 0x2f, 0x0c, 0x47, 0x2c, 0x0d, 0xe5, 0x49, 0x91, 0xc8, 0x61, 0x0a, + 0x58, 0x8d, 0x1b, 0x44, 0xb2, 0x44, 0x59, 0xf4, 0x9e, 0x74, 0x66, 0xae, 0xcb, 0x83, 0x48, 0x01, + 0xaf, 0xc7, 0x5c, 0xe6, 0xda, 0x6c, 0xf3, 0xf4, 0x45, 0xb2, 0xc1, 0x63, 0x85, 0x82, 0x23, 0x12, + 0xe9, 0xb0, 0xe1, 0x10, 0x83, 0x8e, 0x03, 0x62, 0xe9, 0x2f, 0x6d, 0x32, 0xb2, 0xc4, 0x9d, 0x4a, + 0x2d, 0xf3, 0x49, 0x22, 0x52, 0x4b, 0xf3, 0x11, 0x1f, 0x8d, 0x6b, 0x11, 0x9c, 0xa0, 0x59, 0xe6, + 0x20, 0xbe, 0xd0, 0x06, 0x54, 0xfb, 0xcf, 0xfb, 0x83, 0xce, 0xa1, 0x7e, 0x78, 0xb4, 0xd7, 0x91, + 0x35, 0x4c, 0xfd, 0x0e, 0x16, 0xa4, 0xc2, 0xda, 0x07, 0x47, 0x83, 0xd6, 0x81, 0x3e, 0xe8, 0xb6, + 0x9f, 0xf6, 0xd5, 0x1c, 0xda, 0x86, 0xcd, 0xc1, 0x3e, 0x3e, 0x1a, 0x0c, 0x0e, 0x3a, 0x7b, 0xfa, + 0x71, 0x07, 0x77, 0x8f, 0xf6, 0xfa, 0x6a, 0x1e, 0x21, 0xa8, 0x4d, 0xd8, 0x83, 0xee, 0x61, 0x47, + 0x2d, 0xa0, 0x2a, 0xac, 0x1e, 0x77, 0x70, 0xbb, 0xd3, 0x1b, 0xa8, 0x45, 0xed, 0x97, 0x79, 0xa8, + 0x26, 0xac, 0xc8, 0x1c, 0x39, 0xa0, 0xe2, 0xc8, 0x52, 0xc0, 0xec, 0x93, 0xbf, 0xb9, 0x1a, 0xe6, + 0x99, 0xb0, 0x4e, 0x01, 0x0b, 0x82, 0x1f, 0x53, 0x8c, 0xcb, 0xc4, 0x3a, 0x2f, 0xe0, 0xb2, 0x63, + 0x5c, 0x0a, 0x90, 0xef, 0xc3, 0xda, 0x39, 0x09, 0x5c, 0x32, 0x92, 0xed, 0xc2, 0x22, 0x55, 0xc1, + 0x13, 0x5d, 0x76, 0x40, 0x95, 0x5d, 0x26, 0x30, 0xc2, 0x1c, 0x35, 0xc1, 0x3f, 0x8c, 0xc0, 0xb6, + 0xa0, 0x28, 0x9a, 0x57, 0xc5, 0xfc, 0x9c, 0x60, 0xdb, 0x14, 0x7d, 0x6d, 0xf8, 0x3c, 0x3d, 0x2c, + 0x60, 0xfe, 0x8d, 0x4e, 0x67, 0xed, 0x53, 0xe2, 0xf6, 0xb9, 0xbf, 0xb8, 0x3b, 0xbf, 0xc9, 0x44, + 0x67, 0xb1, 0x89, 0x56, 0x21, 0x8f, 0xa3, 0xc2, 0x9f, 0x76, 0xab, 0xbd, 0xcf, 0xcc, 0xb2, 0x0e, + 0x95, 0xc3, 0xd6, 0xcf, 0xf4, 0x93, 0x3e, 0xbf, 0x90, 0x47, 0x2a, 0xac, 0x3d, 0xed, 0xe0, 0x5e, + 0xe7, 0x40, 0x72, 0xf2, 0x68, 0x0b, 0x54, 0xc9, 0x99, 0xf4, 0x2b, 0x30, 0x04, 0xf1, 0x59, 0x44, + 0x65, 0x28, 0xf4, 0x9f, 0xb5, 0x8e, 0xd5, 0x92, 0xf6, 0xdf, 0x39, 0xd8, 0x10, 0xdb, 0x42, 0x5c, + 0xa2, 0xf0, 0xe6, 0x27, 0xda, 0xe4, 0x05, 0x55, 0x2e, 0x7d, 0x41, 0x15, 0x25, 0xa1, 0x7c, 0x57, + 0xcf, 0x4f, 0x92, 0x50, 0x7e, 0x69, 0x93, 0x8a, 0xf8, 0x85, 0x45, 0x22, 0x7e, 0x1d, 0x56, 0x1d, + 0x42, 0x63, 0xbb, 0x55, 0x70, 0x44, 0x22, 0x1b, 0xaa, 0x86, 0xeb, 0x7a, 0xa1, 0x21, 0x6e, 0x7d, + 0x4b, 0x0b, 0x6d, 0x86, 0x53, 0xff, 0xb8, 0xd9, 0x9a, 0x20, 0x89, 0xc0, 0x9c, 0xc4, 0x6e, 0xfc, + 0x14, 0xd4, 0xe9, 0x0e, 0x8b, 0x6c, 0x87, 0x3f, 0xf8, 0xd1, 0x64, 0x37, 0x24, 0x6c, 0x5d, 0xc8, + 0xe7, 0x12, 0x75, 0x85, 0x11, 0xf8, 0xa4, 0xd7, 0xeb, 0xf6, 0x1e, 0xab, 0x0a, 0x02, 0x28, 0x75, + 0x7e, 0xd6, 0x1d, 0x74, 0xf6, 0xd4, 0xdc, 0xee, 0xaf, 0x36, 0xa1, 0x24, 0x84, 0x44, 0xdf, 0xca, + 0x4c, 0x20, 0x59, 0xfe, 0x8a, 0x7e, 0xba, 0x70, 0x46, 0x9d, 0x2a, 0xa9, 0x6d, 0x3c, 0x5c, 0x7a, + 0xbc, 0x7c, 0x6e, 0x5c, 0x41, 0x7f, 0xa3, 0xc0, 0x5a, 0xea, 0xa9, 0x31, 0xeb, 0xad, 0xf7, 0x9c, + 0x6a, 0xdb, 0xc6, 0x4f, 0x96, 0x1a, 0x1b, 0xcb, 0xf2, 0x8d, 0x02, 0xd5, 0x44, 0x9d, 0x29, 0xba, + 0xbf, 0x4c, 0x6d, 0xaa, 0x90, 0xe4, 0xb3, 0xe5, 0xcb, 0x5a, 0xb5, 0x95, 0x8f, 0x15, 0xf4, 0xd7, + 0x0a, 0x54, 0x13, 0x15, 0x97, 0x99, 0x45, 0x99, 0xad, 0x0f, 0xcd, 0x2c, 0xca, 0xbc, 0x02, 0xcf, + 0x15, 0xf4, 0x97, 0x0a, 0x54, 0xe2, 0xea, 0x49, 0x74, 0x6f, 0xf1, 0x7a, 0x4b, 0x21, 0xc4, 0xa7, + 0xcb, 0x16, 0x6a, 0x6a, 0x2b, 0xe8, 0xcf, 0xa1, 0x1c, 0x95, 0x1a, 0xa2, 0xac, 0xbb, 0xd7, 0x54, + 0x1d, 0x63, 0xe3, 0xde, 0xc2, 0xe3, 0x92, 0xd3, 0x47, 0xf5, 0x7f, 0x99, 0xa7, 0x9f, 0xaa, 0x54, + 0x6c, 0xdc, 0x5b, 0x78, 0x5c, 0x3c, 0x3d, 0xf3, 0x84, 0x44, 0x99, 0x60, 0x66, 0x4f, 0x98, 0xad, + 0x4f, 0xcc, 0xec, 0x09, 0xf3, 0xaa, 0x12, 0x85, 0x20, 0x89, 0x42, 0xc3, 0xcc, 0x82, 0xcc, 0x16, + 0x33, 0x66, 0x16, 0x64, 0x4e, 0x5d, 0xa3, 0xb6, 0x82, 0x7e, 0xa1, 0x24, 0xcf, 0x05, 0xf7, 0x16, + 0xae, 0xa7, 0x5b, 0xd0, 0x25, 0x67, 0x2a, 0xfa, 0xf8, 0x02, 0xfd, 0x85, 0xbc, 0xc5, 0x10, 0xe5, + 0x78, 0x68, 0x11, 0xb0, 0x54, 0x05, 0x5f, 0xe3, 0x93, 0xe5, 0x36, 0x1b, 0x2e, 0xc4, 0x5f, 0x29, + 0x00, 0x93, 0xc2, 0xbd, 0xcc, 0x42, 0xcc, 0x54, 0x0c, 0x36, 0xee, 0x2f, 0x31, 0x32, 0xb9, 0x40, + 0xa2, 0xc2, 0xa2, 0xcc, 0x0b, 0x64, 0xaa, 0xb0, 0x30, 0xf3, 0x02, 0x99, 0x2e, 0x0a, 0xd4, 0x56, + 0xd0, 0x3f, 0x29, 0xb0, 0x39, 0x53, 0xd8, 0x84, 0x1e, 0x5e, 0xb3, 0xb6, 0xad, 0xf1, 0xc5, 0xf2, + 0x00, 0x91, 0x68, 0x3b, 0xca, 0xc7, 0x0a, 0xfa, 0x5b, 0x05, 0xd6, 0xd3, 0x05, 0x1f, 0x99, 0x77, + 0xa9, 0x39, 0x25, 0x52, 0x8d, 0x07, 0xcb, 0x0d, 0x8e, 0xb5, 0xf5, 0xf7, 0x0a, 0xd4, 0xd2, 0xb5, + 0x3f, 0xe8, 0xc1, 0x62, 0x61, 0x61, 0x4a, 0xa0, 0xcf, 0x97, 0x1c, 0x1d, 0x49, 0xf4, 0xe5, 0xea, + 0x1f, 0x15, 0x45, 0xf6, 0x56, 0xe2, 0x3f, 0x3f, 0xfe, 0xff, 0x00, 0x00, 0x00, 0xff, 0xff, 0x73, + 0x96, 0x70, 0x25, 0xa5, 0x34, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. diff --git a/plugins/drivers/proto/driver.proto b/plugins/drivers/proto/driver.proto index b49fcc3833b8..98e845c3ed94 100644 --- a/plugins/drivers/proto/driver.proto +++ b/plugins/drivers/proto/driver.proto @@ -371,7 +371,6 @@ message DriverCapabilities { NONE = 0; CHROOT = 1; IMAGE = 2; - UNVEIL= 3; } // FsIsolation indicates what kind of filesystem isolation a driver supports. FSIsolation fs_isolation = 3; @@ -397,10 +396,6 @@ message DriverCapabilities { // disable_log_collection indicates whether the driver has the capability of // disabling log collection bool disable_log_collection = 8; - - // dynamic_workload_users indicates the task is capable of using UID/GID - // assigned from the Nomad client as user credentials for the task. - bool dynamic_workload_users = 9; } message NetworkIsolationSpec { @@ -595,8 +590,6 @@ message Mount { // Propagation mode for the mount. Not exactly the same as the unix mount // propagation flags. See callsite usage for details. string propagation_mode = 4; - - string selinux_label = 5; } message Device { diff --git a/plugins/drivers/server.go b/plugins/drivers/server.go index 36a9d96dc5bd..cfd2476871a5 100644 --- a/plugins/drivers/server.go +++ b/plugins/drivers/server.go @@ -15,7 +15,6 @@ import ( "google.golang.org/grpc/status" "github.com/hashicorp/nomad/nomad/structs" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/drivers/proto" dstructs "github.com/hashicorp/nomad/plugins/shared/structs" sproto "github.com/hashicorp/nomad/plugins/shared/structs/proto" @@ -50,19 +49,16 @@ func (b *driverPluginServer) Capabilities(ctx context.Context, req *proto.Capabi MustCreateNetwork: caps.MustInitiateNetwork, NetworkIsolationModes: []proto.NetworkIsolationSpec_NetworkIsolationMode{}, RemoteTasks: caps.RemoteTasks, - DynamicWorkloadUsers: caps.DynamicWorkloadUsers, }, } switch caps.FSIsolation { - case fsisolation.None: + case FSIsolationNone: resp.Capabilities.FsIsolation = proto.DriverCapabilities_NONE - case fsisolation.Chroot: + case FSIsolationChroot: resp.Capabilities.FsIsolation = proto.DriverCapabilities_CHROOT - case fsisolation.Image: + case FSIsolationImage: resp.Capabilities.FsIsolation = proto.DriverCapabilities_IMAGE - case fsisolation.Unveil: - resp.Capabilities.FsIsolation = proto.DriverCapabilities_UNVEIL default: resp.Capabilities.FsIsolation = proto.DriverCapabilities_NONE } diff --git a/plugins/drivers/testutils/exec_testing.go b/plugins/drivers/testutils/exec_testing.go index c0eeebd8de57..309a186fd203 100644 --- a/plugins/drivers/testutils/exec_testing.go +++ b/plugins/drivers/testutils/exec_testing.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "os" + "reflect" "regexp" "runtime" "strings" @@ -19,7 +20,7 @@ import ( "github.com/hashicorp/nomad/plugins/drivers" dproto "github.com/hashicorp/nomad/plugins/drivers/proto" "github.com/hashicorp/nomad/testutil" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func ExecTaskStreamingConformanceTests(t *testing.T, driver *DriverHarness, taskID string) { @@ -120,29 +121,30 @@ func TestExecTaskStreamingBasicResponses(t *testing.T, driver *DriverHarness, ta result := execTask(t, driver, taskID, c.Command, c.Tty, c.Stdin) - must.Eq(t, c.ExitCode, result.exitCode) + require.Equal(t, c.ExitCode, result.exitCode) switch s := c.Stdout.(type) { case string: - must.Eq(t, s, result.stdout) + require.Equal(t, s, result.stdout) case *regexp.Regexp: - must.RegexMatch(t, s, result.stdout) + require.Regexp(t, s, result.stdout) case nil: - must.Eq(t, "", result.stdout) + require.Empty(t, result.stdout) default: - t.Fatal("unexpected type") + require.Fail(t, "unexpected stdout type", "found %v (%v), but expected string or regexp", s, reflect.TypeOf(s)) } switch s := c.Stderr.(type) { case string: - must.Eq(t, s, result.stderr) + require.Equal(t, s, result.stderr) case *regexp.Regexp: - must.RegexMatch(t, s, result.stderr) + require.Regexp(t, s, result.stderr) case nil: - must.Eq(t, "", result.stderr) + require.Empty(t, result.stderr) default: - t.Fatal("unexpected type") + require.Fail(t, "unexpected stderr type", "found %v (%v), but expected string or regexp", s, reflect.TypeOf(s)) } + }) } } @@ -152,7 +154,7 @@ func TestExecTaskStreamingBasicResponses(t *testing.T, driver *DriverHarness, ta func TestExecFSIsolation(t *testing.T, driver *DriverHarness, taskID string) { t.Run("isolation", func(t *testing.T) { caps, err := driver.Capabilities() - must.NoError(t, err) + require.NoError(t, err) isolated := (caps.FSIsolation != drivers.FSIsolationNone) @@ -162,7 +164,7 @@ func TestExecFSIsolation(t *testing.T, driver *DriverHarness, taskID string) { w := execTask(t, driver, taskID, fmt.Sprintf(`FILE=$(mktemp); echo "$FILE"; echo %q >> "${FILE}"`, text), false, "") - must.Zero(t, w.exitCode) + require.Zero(t, w.exitCode) tempfile := strings.TrimSpace(w.stdout) if !isolated { @@ -174,26 +176,26 @@ func TestExecFSIsolation(t *testing.T, driver *DriverHarness, taskID string) { // read from host b, err := os.ReadFile(tempfile) if !isolated { - must.NoError(t, err) - must.Eq(t, text, strings.TrimSpace(string(b))) + require.NoError(t, err) + require.Equal(t, text, strings.TrimSpace(string(b))) } else { - must.Error(t, err) - must.True(t, os.IsNotExist(err)) + require.Error(t, err) + require.True(t, os.IsNotExist(err)) } // read should succeed from task again r := execTask(t, driver, taskID, fmt.Sprintf("cat %q", tempfile), false, "") - must.Zero(t, r.exitCode) - must.Eq(t, text, strings.TrimSpace(r.stdout)) + require.Zero(t, r.exitCode) + require.Equal(t, text, strings.TrimSpace(r.stdout)) // we always run in a cgroup - testing freezer cgroup r = execTask(t, driver, taskID, "cat /proc/self/cgroup", false, "", ) - must.Zero(t, r.exitCode) + require.Zero(t, r.exitCode) switch cgroupslib.GetMode() { @@ -212,7 +214,7 @@ func TestExecFSIsolation(t *testing.T, driver *DriverHarness, taskID string) { } } if !ok { - t.Fatal("unexpected freezer cgroup") + require.Fail(t, "unexpected freezer cgroup", "expected freezer to be /nomad/ or /docker/, but found:\n%s", r.stdout) } case cgroupslib.CG2: info, _ := driver.PluginInfo() @@ -223,7 +225,7 @@ func TestExecFSIsolation(t *testing.T, driver *DriverHarness, taskID string) { t.Skip("/proc/self/cgroup not useful in docker cgroups.v2") } // e.g. 0::/testing.slice/5bdbd6c2-8aba-3ab2-728b-0ff3a81727a9.sleep.scope - must.True(t, strings.HasSuffix(strings.TrimSpace(r.stdout), ".scope"), must.Sprintf("actual stdout %q", r.stdout)) + require.True(t, strings.HasSuffix(strings.TrimSpace(r.stdout), ".scope"), "actual stdout %q", r.stdout) } }) } @@ -247,27 +249,27 @@ func execTask(t *testing.T, driver *DriverHarness, taskID string, cmd string, tt isRaw = true err := raw.ExecTaskStreamingRaw(ctx, taskID, command, tty, stream) - must.NoError(t, err) + require.NoError(t, err) } else if d, ok := driver.impl.(drivers.ExecTaskStreamingDriver); ok { execOpts, errCh := drivers.StreamToExecOptions(ctx, command, tty, stream) r, err := d.ExecTaskStreaming(ctx, taskID, execOpts) - must.NoError(t, err) + require.NoError(t, err) select { case err := <-errCh: - must.NoError(t, err) + require.NoError(t, err) default: // all good } exitCode = r.ExitCode } else { - t.Fatal("driver does not support exec") + require.Fail(t, "driver does not support exec") } result := stream.currentResult() - must.NoError(t, result.err) + require.NoError(t, result.err) if !isRaw { result.exitCode = exitCode diff --git a/plugins/drivers/testutils/testing.go b/plugins/drivers/testutils/testing.go index bf645f08f899..9ceb0227dcb6 100644 --- a/plugins/drivers/testutils/testing.go +++ b/plugins/drivers/testutils/testing.go @@ -23,10 +23,9 @@ import ( "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/base" "github.com/hashicorp/nomad/plugins/drivers" - "github.com/hashicorp/nomad/plugins/drivers/fsisolation" "github.com/hashicorp/nomad/plugins/shared/hclspec" testing "github.com/mitchellh/go-testing-interface" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) type DriverHarness struct { @@ -56,7 +55,7 @@ func NewDriverHarness(t testing.T, d drivers.DriverPlugin) *DriverHarness { ) raw, err := client.Dispense(base.PluginTypeDriver) - must.NoError(t, err) + require.NoError(t, err, "failed to dispense plugin") dClient := raw.(drivers.DriverPlugin) return &DriverHarness{ @@ -81,25 +80,21 @@ func (h *DriverHarness) Kill() { // between tests. func (h *DriverHarness) MkAllocDir(t *drivers.TaskConfig, enableLogs bool) func() { dir, err := os.MkdirTemp("", "nomad_driver_harness-") - must.NoError(h.t, err) + require.NoError(h.t, err) - mountsDir, err := os.MkdirTemp("", "nomad_driver_harness-mounts-") - must.NoError(h.t, err) - must.NoError(h.t, os.Chmod(mountsDir, 0755)) - - allocDir := allocdir.NewAllocDir(h.logger, dir, mountsDir, t.AllocID) - must.NoError(h.t, allocDir.Build()) + allocDir := allocdir.NewAllocDir(h.logger, dir, t.AllocID) + require.NoError(h.t, allocDir.Build()) t.AllocDir = allocDir.AllocDir taskDir := allocDir.NewTaskDir(t.Name) caps, err := h.Capabilities() - must.NoError(h.t, err) + require.NoError(h.t, err) fsi := caps.FSIsolation h.logger.Trace("FS isolation", "fsi", fsi) - must.NoError(h.t, taskDir.Build(fsi, ci.TinyChroot, t.User)) + require.NoError(h.t, taskDir.Build(fsi == drivers.FSIsolationChroot, ci.TinyChroot)) task := &structs.Task{ Name: t.Name, @@ -147,7 +142,7 @@ func (h *DriverHarness) MkAllocDir(t *drivers.TaskConfig, enableLogs bool) func( MaxFiles: 10, MaxFileSizeMB: 10, }) - must.NoError(h.t, err) + require.NoError(h.t, err) return func() { lm.Stop() @@ -256,7 +251,7 @@ func (d *MockDriver) ExecTaskStreaming(ctx context.Context, taskID string, execO } // SetEnvvars sets path and host env vars depending on the FS isolation used. -func SetEnvvars(envBuilder *taskenv.Builder, fsmode fsisolation.Mode, taskDir *allocdir.TaskDir) { +func SetEnvvars(envBuilder *taskenv.Builder, fsi drivers.FSIsolation, taskDir *allocdir.TaskDir) { envBuilder.SetClientTaskRoot(taskDir.Dir) envBuilder.SetClientSharedAllocDir(taskDir.SharedAllocDir) @@ -264,13 +259,8 @@ func SetEnvvars(envBuilder *taskenv.Builder, fsmode fsisolation.Mode, taskDir *a envBuilder.SetClientTaskSecretsDir(taskDir.SecretsDir) // Set driver-specific environment variables - switch fsmode { - case fsisolation.Unveil: - // Use mounts host paths - envBuilder.SetAllocDir(filepath.Join(taskDir.MountsAllocDir, "alloc")) - envBuilder.SetTaskLocalDir(filepath.Join(taskDir.MountsTaskDir, "local")) - envBuilder.SetSecretsDir(filepath.Join(taskDir.SecretsDir, "secrets")) - case fsisolation.None: + switch fsi { + case drivers.FSIsolationNone: // Use host paths envBuilder.SetAllocDir(taskDir.SharedAllocDir) envBuilder.SetTaskLocalDir(taskDir.LocalDir) @@ -283,7 +273,7 @@ func SetEnvvars(envBuilder *taskenv.Builder, fsmode fsisolation.Mode, taskDir *a } // Set the host environment variables for non-image based drivers - if fsmode != fsisolation.Image { + if fsi != drivers.FSIsolationImage { envBuilder.SetHostEnvvars([]string{"env.denylist"}) } } diff --git a/plugins/drivers/testutils/testing_test.go b/plugins/drivers/testutils/testing_test.go index e505e7d6a7f3..92806337264a 100644 --- a/plugins/drivers/testutils/testing_test.go +++ b/plugins/drivers/testutils/testing_test.go @@ -11,12 +11,12 @@ import ( "testing" "time" - "github.com/hashicorp/go-msgpack/v2/codec" + "github.com/hashicorp/go-msgpack/codec" "github.com/hashicorp/nomad/ci" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers" pstructs "github.com/hashicorp/nomad/plugins/shared/structs" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) var _ drivers.DriverPlugin = (*MockDriver)(nil) @@ -34,8 +34,8 @@ func TestDriverHarness(t *testing.T) { harness := NewDriverHarness(t, d) defer harness.Kill() actual, _, err := harness.StartTask(&drivers.TaskConfig{}) - must.NoError(t, err) - must.Eq(t, handle.Config.Name, actual.Config.Name) + require.NoError(t, err) + require.Equal(t, handle.Config.Name, actual.Config.Name) } type testDriverState struct { @@ -45,6 +45,7 @@ type testDriverState struct { func TestBaseDriver_Fingerprint(t *testing.T) { ci.Parallel(t) + require := require.New(t) fingerprints := []*drivers.Fingerprint{ { @@ -80,7 +81,7 @@ func TestBaseDriver_Fingerprint(t *testing.T) { defer harness.Kill() ch, err := harness.Fingerprint(context.Background()) - must.NoError(t, err) + require.NoError(err) var wg sync.WaitGroup wg.Add(1) @@ -88,24 +89,25 @@ func TestBaseDriver_Fingerprint(t *testing.T) { defer wg.Done() select { case f := <-ch: - must.Eq(t, f, fingerprints[0]) + require.Exactly(f, fingerprints[0]) case <-time.After(1 * time.Second): - t.Fatal("did not receive fingerprint[0]") + require.Fail("did not receive fingerprint[0]") } select { case f := <-ch: - must.Eq(t, f, fingerprints[1]) + require.Exactly(f, fingerprints[1]) case <-time.After(1 * time.Second): - t.Fatal("did not receive fingerprint[1]") + require.Fail("did not receive fingerprint[1]") } }() - must.False(t, complete.Load().(bool)) + require.False(complete.Load().(bool)) wg.Wait() - must.True(t, complete.Load().(bool)) + require.True(complete.Load().(bool)) } func TestBaseDriver_RecoverTask(t *testing.T) { ci.Parallel(t) + require := require.New(t) // build driver state and encode it into proto msg state := testDriverState{Pid: 1, Log: "foo"} @@ -117,8 +119,8 @@ func TestBaseDriver_RecoverTask(t *testing.T) { impl := &MockDriver{ RecoverTaskF: func(h *drivers.TaskHandle) error { var actual testDriverState - must.NoError(t, h.GetDriverState(&actual)) - must.Eq(t, state, actual) + require.NoError(h.GetDriverState(&actual)) + require.Equal(state, actual) return nil }, } @@ -130,11 +132,12 @@ func TestBaseDriver_RecoverTask(t *testing.T) { DriverState: buf.Bytes(), } err := harness.RecoverTask(handle) - must.NoError(t, err) + require.NoError(err) } func TestBaseDriver_StartTask(t *testing.T) { ci.Parallel(t) + require := require.New(t) cfg := &drivers.TaskConfig{ ID: "foo", @@ -154,18 +157,19 @@ func TestBaseDriver_StartTask(t *testing.T) { harness := NewDriverHarness(t, impl) defer harness.Kill() resp, _, err := harness.StartTask(cfg) - must.NoError(t, err) - must.Eq(t, cfg.ID, resp.Config.ID) - must.Eq(t, handle.State, resp.State) + require.NoError(err) + require.Equal(cfg.ID, resp.Config.ID) + require.Equal(handle.State, resp.State) var actualState testDriverState - must.NoError(t, resp.GetDriverState(&actualState)) - must.Eq(t, *state, actualState) + require.NoError(resp.GetDriverState(&actualState)) + require.Equal(*state, actualState) } func TestBaseDriver_WaitTask(t *testing.T) { ci.Parallel(t) + require := require.New(t) result := &drivers.ExitResult{ExitCode: 1, Signal: 9} @@ -190,19 +194,20 @@ func TestBaseDriver_WaitTask(t *testing.T) { go func() { defer wg.Done() ch, err := harness.WaitTask(context.TODO(), "foo") - must.NoError(t, err) + require.NoError(err) actualResult := <-ch finished = true - must.Eq(t, result, actualResult) + require.Exactly(result, actualResult) }() - must.False(t, finished) + require.False(finished) close(signalTask) wg.Wait() - must.True(t, finished) + require.True(finished) } func TestBaseDriver_TaskEvents(t *testing.T) { ci.Parallel(t) + require := require.New(t) now := time.Now().UTC().Truncate(time.Millisecond) events := []*drivers.TaskEvent{ @@ -249,14 +254,14 @@ func TestBaseDriver_TaskEvents(t *testing.T) { defer harness.Kill() ch, err := harness.TaskEvents(context.Background()) - must.NoError(t, err) + require.NoError(err) for _, event := range events { select { case actual := <-ch: - must.Eq(t, actual, event) + require.Exactly(actual, event) case <-time.After(500 * time.Millisecond): - t.Fatal("failed to receive event") + require.Fail("failed to receive event") } } @@ -286,6 +291,6 @@ func TestBaseDriver_Capabilities(t *testing.T) { defer harness.Kill() caps, err := harness.Capabilities() - must.NoError(t, err) - must.Eq(t, capabilities, caps) + require.NoError(t, err) + require.Equal(t, capabilities, caps) } diff --git a/plugins/drivers/utils.go b/plugins/drivers/utils.go index 557493822652..39b1f0843ab8 100644 --- a/plugins/drivers/utils.go +++ b/plugins/drivers/utils.go @@ -294,7 +294,6 @@ func MountFromProto(mount *proto.Mount) *MountConfig { HostPath: mount.HostPath, Readonly: mount.Readonly, PropagationMode: mount.PropagationMode, - SELinuxLabel: mount.SelinuxLabel, } } @@ -346,7 +345,6 @@ func MountToProto(mount *MountConfig) *proto.Mount { HostPath: mount.HostPath, Readonly: mount.Readonly, PropagationMode: mount.PropagationMode, - SelinuxLabel: mount.SELinuxLabel, } } diff --git a/plugins/drivers/utils_test.go b/plugins/drivers/utils_test.go index 14267e98110b..e64bf5102c3f 100644 --- a/plugins/drivers/utils_test.go +++ b/plugins/drivers/utils_test.go @@ -9,7 +9,8 @@ import ( "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" "github.com/hashicorp/nomad/plugins/drivers/proto" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestResourceUsageRoundTrip(t *testing.T) { @@ -35,7 +36,8 @@ func TestResourceUsageRoundTrip(t *testing.T) { } parsed := resourceUsageFromProto(resourceUsageToProto(input)) - must.Eq(t, parsed, input) + + require.EqualValues(t, parsed, input) } func TestTaskConfigRoundTrip(t *testing.T) { @@ -107,7 +109,8 @@ func TestTaskConfigRoundTrip(t *testing.T) { } parsed := taskConfigFromProto(taskConfigToProto(input)) - must.Eq(t, input, parsed) + + require.EqualValues(t, input, parsed) } @@ -137,7 +140,7 @@ func Test_networkCreateRequestFromProto(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { actualOutput := networkCreateRequestFromProto(tc.inputPB) - must.Eq(t, tc.expectedOutput, actualOutput) + assert.Equal(t, tc.expectedOutput, actualOutput, tc.name) }) } } diff --git a/plugins/shared/structs/attribute_test.go b/plugins/shared/structs/attribute_test.go index 1a01e7cf1bc3..22a342056f83 100644 --- a/plugins/shared/structs/attribute_test.go +++ b/plugins/shared/structs/attribute_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/hashicorp/nomad/helper/pointer" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestAttribute_Validate(t *testing.T) { @@ -77,7 +77,7 @@ func TestAttribute_Validate(t *testing.T) { for _, c := range cases { t.Run(c.Input.GoString(), func(t *testing.T) { if err := c.Input.Validate(); err != nil && !c.Fail { - must.NoError(t, err) + require.NoError(t, err) } }) } @@ -538,7 +538,7 @@ func testComparison(t *testing.T, cases []*compareTestCase) { if !ok && !c.NotComparable { t.Fatal("should be comparable") } else if ok { - must.Eq(t, c.Expected, v) + require.Equal(t, c.Expected, v) } }) } @@ -662,8 +662,8 @@ func TestAttribute_ParseAndValidate(t *testing.T) { for _, c := range cases { t.Run(c.Input, func(t *testing.T) { a := ParseAttribute(c.Input) - must.Eq(t, c.Expected, a) - must.NoError(t, a.Validate()) + require.Equal(t, c.Expected, a) + require.NoError(t, a.Validate()) }) } } diff --git a/scheduler/feasible_test.go b/scheduler/feasible_test.go index f552b70c9f34..9c4990ae4382 100644 --- a/scheduler/feasible_test.go +++ b/scheduler/feasible_test.go @@ -14,8 +14,9 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" psstructs "github.com/hashicorp/nomad/plugins/shared/structs" - "github.com/shoenig/test" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestStaticIterator_Reset(t *testing.T) { @@ -353,7 +354,7 @@ func TestCSIVolumeChecker(t *testing.T) { index := uint64(999) for _, node := range nodes { err := state.UpsertNode(structs.MsgTypeTestSetup, index, node) - must.NoError(t, err) + require.NoError(t, err) index++ } @@ -369,7 +370,7 @@ func TestCSIVolumeChecker(t *testing.T) { {Segments: map[string]string{"rack": "R2"}}, } err := state.UpsertCSIVolume(index, []*structs.CSIVolume{vol}) - must.NoError(t, err) + require.NoError(t, err) index++ // Create some other volumes in use on nodes[3] to trip MaxVolumes @@ -380,14 +381,14 @@ func TestCSIVolumeChecker(t *testing.T) { vol2.AccessMode = structs.CSIVolumeAccessModeMultiNodeSingleWriter vol2.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol2}) - must.NoError(t, err) + require.NoError(t, err) index++ vid3 := "volume-id[0]" vol3 := vol.Copy() vol3.ID = vid3 err = state.UpsertCSIVolume(index, []*structs.CSIVolume{vol3}) - must.NoError(t, err) + require.NoError(t, err) index++ alloc := mock.Alloc() @@ -400,13 +401,13 @@ func TestCSIVolumeChecker(t *testing.T) { }, } err = state.UpsertJob(structs.MsgTypeTestSetup, index, nil, alloc.Job) - must.NoError(t, err) + require.NoError(t, err) index++ summary := mock.JobSummary(alloc.JobID) - must.NoError(t, state.UpsertJobSummary(index, summary)) + require.NoError(t, state.UpsertJobSummary(index, summary)) index++ err = state.UpsertAllocs(structs.MsgTypeTestSetup, index, []*structs.Allocation{alloc}) - must.NoError(t, err) + require.NoError(t, err) index++ // Create volume requests @@ -498,7 +499,7 @@ func TestCSIVolumeChecker(t *testing.T) { for _, c := range cases { checker.SetVolumes(alloc.Name, c.RequestedVolumes) - test.Eq(t, c.Result, checker.Feasible(c.Node), test.Sprint(c.Name)) + assert.Equal(t, c.Result, checker.Feasible(c.Node), c.Name) } // add a missing volume @@ -514,7 +515,7 @@ func TestCSIVolumeChecker(t *testing.T) { for _, node := range nodes { checker.SetVolumes(alloc.Name, volumes) act := checker.Feasible(node) - must.False(t, act, must.Sprint("request with missing volume should never be feasible")) + require.False(t, act, "request with missing volume should never be feasible") } } @@ -660,7 +661,7 @@ func TestNetworkChecker(t *testing.T) { for _, c := range cases { checker.SetNetwork(c.network) for i, node := range nodes { - must.Eq(t, c.results[i], checker.Feasible(node), must.Sprintf("mode=%q, idx=%d", c.network.Mode, i)) + require.Equal(t, c.results[i], checker.Feasible(node), "mode=%q, idx=%d", c.network.Mode, i) } } } @@ -680,7 +681,7 @@ func TestNetworkChecker_bridge_upgrade_path(t *testing.T) { checker.SetNetwork(&structs.NetworkResource{Mode: "bridge"}) ok := checker.Feasible(oldClient) - must.True(t, ok) + require.True(t, ok) }) t.Run("updated client", func(t *testing.T) { @@ -693,7 +694,7 @@ func TestNetworkChecker_bridge_upgrade_path(t *testing.T) { checker.SetNetwork(&structs.NetworkResource{Mode: "bridge"}) ok := checker.Feasible(oldClient) - must.False(t, ok) + require.False(t, ok) }) } @@ -804,6 +805,7 @@ func TestDriverChecker_Compatibility(t *testing.T) { func Test_HealthChecks(t *testing.T) { ci.Parallel(t) + require := require.New(t) _, ctx := testContext(t) nodes := []*structs.Node{ @@ -861,7 +863,7 @@ func Test_HealthChecks(t *testing.T) { } checker := NewDriverChecker(ctx, drivers) act := checker.Feasible(c.Node) - must.Eq(t, act, c.Result) + require.Equal(act, c.Result) } } @@ -1346,7 +1348,7 @@ func TestCheckSemverConstraint(t *testing.T) { _, ctx := testContext(t) p := newSemverConstraintParser(ctx) actual := checkVersionMatch(ctx, p, tc.lVal, tc.rVal) - must.Eq(t, tc.result, actual) + require.Equal(t, tc.result, actual) }) } } @@ -2637,11 +2639,11 @@ func TestFeasibilityWrapper_JobEligible_TgEscaped(t *testing.T) { func TestSetContainsAny(t *testing.T) { ci.Parallel(t) - must.True(t, checkSetContainsAny("a", "a")) - must.True(t, checkSetContainsAny("a,b", "a")) - must.True(t, checkSetContainsAny(" a,b ", "a ")) - must.True(t, checkSetContainsAny("a", "a")) - must.False(t, checkSetContainsAny("b", "a")) + require.True(t, checkSetContainsAny("a", "a")) + require.True(t, checkSetContainsAny("a,b", "a")) + require.True(t, checkSetContainsAny(" a,b ", "a ")) + require.True(t, checkSetContainsAny("a", "a")) + require.False(t, checkSetContainsAny("b", "a")) } func TestDeviceChecker(t *testing.T) { diff --git a/scheduler/generic_sched_test.go b/scheduler/generic_sched_test.go index 8a330c3087d1..edbca23ac3f4 100644 --- a/scheduler/generic_sched_test.go +++ b/scheduler/generic_sched_test.go @@ -3644,78 +3644,24 @@ func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { ci.Parallel(t) cases := []struct { - jobSpecFn func(*structs.Job) + stop time.Duration when time.Time rescheduled bool }{ - // Test using stop_after_client_disconnect, remove after its deprecated in favor - // of Disconnect.StopOnClientAfter introduced in 1.8.0. { rescheduled: true, - jobSpecFn: func(job *structs.Job) { - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].StopAfterClientDisconnect = nil - }, }, { - jobSpecFn: func(job *structs.Job) { - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].StopAfterClientDisconnect = pointer.Of(1 * time.Second) - }, + stop: 1 * time.Second, rescheduled: false, }, { - jobSpecFn: func(job *structs.Job) { - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].StopAfterClientDisconnect = pointer.Of(1 * time.Second) - }, - when: time.Now().UTC().Add(-10 * time.Second), - rescheduled: true, - }, - { - jobSpecFn: func(job *structs.Job) { - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].StopAfterClientDisconnect = pointer.Of(1 * time.Second) - }, - when: time.Now().UTC().Add(10 * time.Minute), - rescheduled: false, - }, - // Tests using the new disconnect block - { - rescheduled: true, - jobSpecFn: func(job *structs.Job) { - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - StopOnClientAfter: nil, - } - }, - }, - { - jobSpecFn: func(job *structs.Job) { - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - StopOnClientAfter: pointer.Of(1 * time.Second), - } - }, - rescheduled: false, - }, - { - jobSpecFn: func(job *structs.Job) { - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - StopOnClientAfter: pointer.Of(1 * time.Second), - } - }, + stop: 1 * time.Second, when: time.Now().UTC().Add(-10 * time.Second), rescheduled: true, }, { - jobSpecFn: func(job *structs.Job) { - job.TaskGroups[0].Count = 1 - job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - StopOnClientAfter: pointer.Of(1 * time.Second), - } - }, + stop: 1 * time.Second, when: time.Now().UTC().Add(10 * time.Minute), rescheduled: false, }, @@ -3730,9 +3676,10 @@ func TestServiceSched_StopAfterClientDisconnect(t *testing.T) { node.Status = structs.NodeStatusDown require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) + // Job with allocations and stop_after_client_disconnect job := mock.Job() - - tc.jobSpecFn(job) + job.TaskGroups[0].Count = 1 + job.TaskGroups[0].StopAfterClientDisconnect = &tc.stop require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) // Alloc for the running group @@ -7270,160 +7217,129 @@ func TestPropagateTaskState(t *testing.T) { // Tests that a client disconnect generates attribute updates and follow up evals. func TestServiceSched_Client_Disconnect_Creates_Updates_and_Evals(t *testing.T) { + h := NewHarness(t) + count := 1 + maxClientDisconnect := 10 * time.Minute - jobVersions := []struct { - name string - jobSpec func(time.Duration) *structs.Job - }{ - // Test using max_client_disconnect, remove after its deprecated in favor - // of Disconnect.LostAfter introduced in 1.8.0. - { - name: "job-with-max-client-disconnect-deprecated", - jobSpec: func(maxClientDisconnect time.Duration) *structs.Job { - job := mock.Job() - job.TaskGroups[0].MaxClientDisconnect = &maxClientDisconnect - - return job - }, - }, - { - name: "job-with-disconnect-block", - jobSpec: func(lostAfter time.Duration) *structs.Job { - job := mock.Job() - job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - LostAfter: lostAfter, - } - return job - }, - }, - } - - for _, version := range jobVersions { - t.Run(version.name, func(t *testing.T) { - - h := NewHarness(t) - count := 1 - maxClientDisconnect := 10 * time.Minute - - job := version.jobSpec(maxClientDisconnect) - job.TaskGroups[0].Count = count - require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) - - disconnectedNode, job, unknownAllocs := initNodeAndAllocs(t, h, job, - structs.NodeStatusReady, structs.AllocClientStatusRunning) - - // Now disconnect the node - disconnectedNode.Status = structs.NodeStatusDisconnected - require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), disconnectedNode)) - - // Create an evaluation triggered by the disconnect - evals := []*structs.Evaluation{{ - Namespace: structs.DefaultNamespace, - ID: uuid.Generate(), - Priority: 50, - TriggeredBy: structs.EvalTriggerNodeUpdate, - JobID: job.ID, - NodeID: disconnectedNode.ID, - Status: structs.EvalStatusPending, - }} - - nodeStatusUpdateEval := evals[0] - require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), evals)) + disconnectedNode, job, unknownAllocs := initNodeAndAllocs(t, h, count, maxClientDisconnect, + structs.NodeStatusReady, structs.AllocClientStatusRunning) - // Process the evaluation - err := h.Process(NewServiceScheduler, nodeStatusUpdateEval) - require.NoError(t, err) - require.Equal(t, structs.EvalStatusComplete, h.Evals[0].Status) - require.Len(t, h.Plans, 1, "plan") + // Now disconnect the node + disconnectedNode.Status = structs.NodeStatusDisconnected + require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), disconnectedNode)) - // Two followup delayed eval created - require.Len(t, h.CreateEvals, 2) - followUpEval1 := h.CreateEvals[0] - require.Equal(t, nodeStatusUpdateEval.ID, followUpEval1.PreviousEval) - require.Equal(t, "pending", followUpEval1.Status) - require.NotEmpty(t, followUpEval1.WaitUntil) + // Create an evaluation triggered by the disconnect + evals := []*structs.Evaluation{{ + Namespace: structs.DefaultNamespace, + ID: uuid.Generate(), + Priority: 50, + TriggeredBy: structs.EvalTriggerNodeUpdate, + JobID: job.ID, + NodeID: disconnectedNode.ID, + Status: structs.EvalStatusPending, + }} - followUpEval2 := h.CreateEvals[1] - require.Equal(t, nodeStatusUpdateEval.ID, followUpEval2.PreviousEval) - require.Equal(t, "pending", followUpEval2.Status) - require.NotEmpty(t, followUpEval2.WaitUntil) + nodeStatusUpdateEval := evals[0] + require.NoError(t, h.State.UpsertEvals(structs.MsgTypeTestSetup, h.NextIndex(), evals)) - // Insert eval1 in the state store - testutil.WaitForResult(func() (bool, error) { - found, err := h.State.EvalByID(nil, followUpEval1.ID) - if err != nil { - return false, err - } - if found == nil { - return false, nil - } + // Process the evaluation + err := h.Process(NewServiceScheduler, nodeStatusUpdateEval) + require.NoError(t, err) + require.Equal(t, structs.EvalStatusComplete, h.Evals[0].Status) + require.Len(t, h.Plans, 1, "plan") + + // Two followup delayed eval created + require.Len(t, h.CreateEvals, 2) + followUpEval1 := h.CreateEvals[0] + require.Equal(t, nodeStatusUpdateEval.ID, followUpEval1.PreviousEval) + require.Equal(t, "pending", followUpEval1.Status) + require.NotEmpty(t, followUpEval1.WaitUntil) + + followUpEval2 := h.CreateEvals[1] + require.Equal(t, nodeStatusUpdateEval.ID, followUpEval2.PreviousEval) + require.Equal(t, "pending", followUpEval2.Status) + require.NotEmpty(t, followUpEval2.WaitUntil) + + // Insert eval1 in the state store + testutil.WaitForResult(func() (bool, error) { + found, err := h.State.EvalByID(nil, followUpEval1.ID) + if err != nil { + return false, err + } + if found == nil { + return false, nil + } - require.Equal(t, nodeStatusUpdateEval.ID, found.PreviousEval) - require.Equal(t, "pending", found.Status) - require.NotEmpty(t, found.WaitUntil) + require.Equal(t, nodeStatusUpdateEval.ID, found.PreviousEval) + require.Equal(t, "pending", found.Status) + require.NotEmpty(t, found.WaitUntil) - return true, nil - }, func(err error) { + return true, nil + }, func(err error) { - require.NoError(t, err) - }) + require.NoError(t, err) + }) - // Insert eval2 in the state store - testutil.WaitForResult(func() (bool, error) { - found, err := h.State.EvalByID(nil, followUpEval2.ID) - if err != nil { - return false, err - } - if found == nil { - return false, nil - } + // Insert eval2 in the state store + testutil.WaitForResult(func() (bool, error) { + found, err := h.State.EvalByID(nil, followUpEval2.ID) + if err != nil { + return false, err + } + if found == nil { + return false, nil + } - require.Equal(t, nodeStatusUpdateEval.ID, found.PreviousEval) - require.Equal(t, "pending", found.Status) - require.NotEmpty(t, found.WaitUntil) + require.Equal(t, nodeStatusUpdateEval.ID, found.PreviousEval) + require.Equal(t, "pending", found.Status) + require.NotEmpty(t, found.WaitUntil) - return true, nil - }, func(err error) { + return true, nil + }, func(err error) { - require.NoError(t, err) - }) + require.NoError(t, err) + }) - // Validate that the ClientStatus updates are part of the plan. - require.Len(t, h.Plans[0].NodeAllocation[disconnectedNode.ID], count) - // Pending update should have unknown status. + // Validate that the ClientStatus updates are part of the plan. + require.Len(t, h.Plans[0].NodeAllocation[disconnectedNode.ID], count) + // Pending update should have unknown status. - for _, nodeAlloc := range h.Plans[0].NodeAllocation[disconnectedNode.ID] { - require.Equal(t, nodeAlloc.ClientStatus, structs.AllocClientStatusUnknown) - } + for _, nodeAlloc := range h.Plans[0].NodeAllocation[disconnectedNode.ID] { + require.Equal(t, nodeAlloc.ClientStatus, structs.AllocClientStatusUnknown) + } - // Simulate that NodeAllocation got processed. - err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), h.Plans[0].NodeAllocation[disconnectedNode.ID]) - require.NoError(t, err, "plan.NodeUpdate") + // Simulate that NodeAllocation got processed. + err = h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), h.Plans[0].NodeAllocation[disconnectedNode.ID]) + require.NoError(t, err, "plan.NodeUpdate") - // Validate that the StateStore Upsert applied the ClientStatus we specified. + // Validate that the StateStore Upsert applied the ClientStatus we specified. - for _, alloc := range unknownAllocs { - alloc, err = h.State.AllocByID(nil, alloc.ID) - require.NoError(t, err) - require.Equal(t, alloc.ClientStatus, structs.AllocClientStatusUnknown) + for _, alloc := range unknownAllocs { + alloc, err = h.State.AllocByID(nil, alloc.ID) + require.NoError(t, err) + require.Equal(t, alloc.ClientStatus, structs.AllocClientStatusUnknown) - // Allocations have been transitioned to unknown - require.Equal(t, structs.AllocDesiredStatusRun, alloc.DesiredStatus) - require.Equal(t, structs.AllocClientStatusUnknown, alloc.ClientStatus) - } - }) + // Allocations have been transitioned to unknown + require.Equal(t, structs.AllocDesiredStatusRun, alloc.DesiredStatus) + require.Equal(t, structs.AllocClientStatusUnknown, alloc.ClientStatus) } } -func initNodeAndAllocs(t *testing.T, h *Harness, job *structs.Job, - nodeStatus, clientStatus string) (*structs.Node, *structs.Job, []*structs.Allocation) { +func initNodeAndAllocs(t *testing.T, h *Harness, allocCount int, + maxClientDisconnect time.Duration, nodeStatus, clientStatus string) (*structs.Node, *structs.Job, []*structs.Allocation) { // Node, which is ready node := mock.Node() node.Status = nodeStatus require.NoError(t, h.State.UpsertNode(structs.MsgTypeTestSetup, h.NextIndex(), node)) - allocs := make([]*structs.Allocation, job.TaskGroups[0].Count) - for i := 0; i < job.TaskGroups[0].Count; i++ { + // Job with allocations and max_client_disconnect + job := mock.Job() + job.TaskGroups[0].Count = allocCount + job.TaskGroups[0].MaxClientDisconnect = &maxClientDisconnect + require.NoError(t, h.State.UpsertJob(structs.MsgTypeTestSetup, h.NextIndex(), nil, job)) + + allocs := make([]*structs.Allocation, allocCount) + for i := 0; i < allocCount; i++ { // Alloc for the running group alloc := mock.Alloc() alloc.Job = job @@ -7438,5 +7354,4 @@ func initNodeAndAllocs(t *testing.T, h *Harness, job *structs.Job, require.NoError(t, h.State.UpsertAllocs(structs.MsgTypeTestSetup, h.NextIndex(), allocs)) return node, job, allocs - } diff --git a/scheduler/reconcile.go b/scheduler/reconcile.go index 7f94c1830721..3ab3cc760c8c 100644 --- a/scheduler/reconcile.go +++ b/scheduler/reconcile.go @@ -19,7 +19,6 @@ import ( "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/helper/uuid" "github.com/hashicorp/nomad/nomad/structs" - reconnectingpicker "github.com/hashicorp/nomad/scheduler/reconnecting_picker" ) const ( @@ -33,10 +32,6 @@ const ( rescheduleWindowSize = 1 * time.Second ) -type ReconnectingPicker interface { - PickReconnectingAlloc(disconnect *structs.DisconnectStrategy, original *structs.Allocation, replacement *structs.Allocation) *structs.Allocation -} - // allocUpdateType takes an existing allocation and a new job definition and // returns whether the allocation can ignore the change, requires a destructive // update, or can be inplace updated. If it can be inplace updated, an updated @@ -107,8 +102,6 @@ type allocReconciler struct { // defaults to time.Now, and overridden in unit tests now time.Time - reconnectingPicker ReconnectingPicker - // result is the results of the reconcile. During computation it can be // used to store intermediate state result *reconcileResults @@ -202,7 +195,6 @@ func NewAllocReconciler(logger log.Logger, allocUpdateFn allocUpdateType, batch jobID string, job *structs.Job, deployment *structs.Deployment, existingAllocs []*structs.Allocation, taintedNodes map[string]*structs.Node, evalID string, evalPriority int, supportsDisconnectedClients bool, opts ...AllocReconcilerOption) *allocReconciler { - ar := &allocReconciler{ logger: logger.Named("reconciler"), allocUpdateFn: allocUpdateFn, @@ -224,7 +216,6 @@ func NewAllocReconciler(logger log.Logger, allocUpdateFn allocUpdateType, batch desiredFollowupEvals: make(map[string][]*structs.Evaluation), taskGroupAllocNameIndexes: make(map[string]*allocNameIndex), }, - reconnectingPicker: reconnectingpicker.New(logger), } for _, op := range opts { @@ -470,7 +461,7 @@ func (a *allocReconciler) computeGroup(groupName string, all allocSet) bool { if len(reconnecting) > 0 { // Pass all allocations because the replacements we need to find may be // in any state, including themselves being reconnected. - reconnect, stop := a.reconcileReconnecting(reconnecting, all, tg) + reconnect, stop := a.reconcileReconnecting(reconnecting, all) // Stop the reconciled allocations and remove them from the other sets // since they have been already handled. @@ -507,7 +498,7 @@ func (a *allocReconciler) computeGroup(groupName string, all allocSet) bool { // If MaxClientDisconnect is enabled as well as tg.PreventRescheduleOnLost, // the reschedule policy won't be enabled and the lost allocations // wont be rescheduled, and PreventRescheduleOnLost is ignored. - if tg.GetDisconnectLostTimeout() != 0 { + if tg.MaxClientDisconnect != nil { untaintedDisconnecting, rescheduleDisconnecting, laterDisconnecting := disconnecting.filterByRescheduleable(a.batch, true, a.now, a.evalID, a.deployment) rescheduleNow = rescheduleNow.union(rescheduleDisconnecting) @@ -1154,7 +1145,7 @@ func (a *allocReconciler) computeStop(group *structs.TaskGroup, nameIndex *alloc // - If the reconnecting allocation is to be stopped, its replacements may // not be present in any of the returned sets. The rest of the reconciler // logic will handle them. -func (a *allocReconciler) reconcileReconnecting(reconnecting allocSet, all allocSet, tg *structs.TaskGroup) (allocSet, allocSet) { +func (a *allocReconciler) reconcileReconnecting(reconnecting allocSet, all allocSet) (allocSet, allocSet) { stop := make(allocSet) reconnect := make(allocSet) @@ -1208,8 +1199,8 @@ func (a *allocReconciler) reconcileReconnecting(reconnecting allocSet, all alloc continue } - // Pick which allocation we want to keep using the disconnect reconcile strategy - keepAlloc := a.reconnectingPicker.PickReconnectingAlloc(tg.Disconnect, reconnectingAlloc, replacementAlloc) + // Pick which allocation we want to keep. + keepAlloc := pickReconnectingAlloc(reconnectingAlloc, replacementAlloc) if keepAlloc == replacementAlloc { // The replacement allocation is preferred, so stop the one // reconnecting if not stopped yet. @@ -1244,6 +1235,44 @@ func (a *allocReconciler) reconcileReconnecting(reconnecting allocSet, all alloc return reconnect, stop } +// pickReconnectingAlloc returns the allocation to keep between the original +// one that is reconnecting and one of its replacements. +// +// This function is not commutative, meaning that pickReconnectingAlloc(A, B) +// is not the same as pickReconnectingAlloc(B, A). Preference is given to keep +// the original allocation when possible. +func pickReconnectingAlloc(original *structs.Allocation, replacement *structs.Allocation) *structs.Allocation { + // Check if the replacement is newer. + // Always prefer the replacement if true. + replacementIsNewer := replacement.Job.Version > original.Job.Version || + replacement.Job.CreateIndex > original.Job.CreateIndex + if replacementIsNewer { + return replacement + } + + // Check if the replacement has better placement score. + // If any of the scores is not available, only pick the replacement if + // itself does have scores. + originalMaxScoreMeta := original.Metrics.MaxNormScore() + replacementMaxScoreMeta := replacement.Metrics.MaxNormScore() + + replacementHasBetterScore := originalMaxScoreMeta == nil && replacementMaxScoreMeta != nil || + (originalMaxScoreMeta != nil && replacementMaxScoreMeta != nil && + replacementMaxScoreMeta.NormScore > originalMaxScoreMeta.NormScore) + + // Check if the replacement has better client status. + // Even with a better placement score make sure we don't replace a running + // allocation with one that is not. + replacementIsRunning := replacement.ClientStatus == structs.AllocClientStatusRunning + originalNotRunning := original.ClientStatus != structs.AllocClientStatusRunning + + if replacementHasBetterScore && (replacementIsRunning || originalNotRunning) { + return replacement + } + + return original +} + // computeUpdates determines which allocations for the passed group require // updates. Three groups are returned: // 1. Those that require no upgrades diff --git a/scheduler/reconcile_test.go b/scheduler/reconcile_test.go index 35e97e2f4509..3e13276a8db4 100644 --- a/scheduler/reconcile_test.go +++ b/scheduler/reconcile_test.go @@ -19,8 +19,9 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/kr/pretty" - "github.com/shoenig/test" "github.com/shoenig/test/must" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var ( @@ -269,6 +270,7 @@ type resultExpectation struct { func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) { t.Helper() + assertion := assert.New(t) if exp.createDeployment != nil && r.deployment == nil { t.Errorf("Expect a created deployment got none") @@ -283,15 +285,15 @@ func assertResults(t *testing.T, r *reconcileResults, exp *resultExpectation) { } } - test.Eq(t, exp.deploymentUpdates, r.deploymentUpdates, test.Sprint("Expected Deployment Updates")) - test.SliceLen(t, exp.place, r.place, test.Sprint("Expected Placements")) - test.SliceLen(t, exp.destructive, r.destructiveUpdate, test.Sprint("Expected Destructive")) - test.SliceLen(t, exp.inplace, r.inplaceUpdate, test.Sprint("Expected Inplace Updates")) - test.MapLen(t, exp.attributeUpdates, r.attributeUpdates, test.Sprint("Expected Attribute Updates")) - test.MapLen(t, exp.reconnectUpdates, r.reconnectUpdates, test.Sprint("Expected Reconnect Updates")) - test.MapLen(t, exp.disconnectUpdates, r.disconnectUpdates, test.Sprint("Expected Disconnect Updates")) - test.SliceLen(t, exp.stop, r.stop, test.Sprint("Expected Stops")) - test.Eq(t, exp.desiredTGUpdates, r.desiredTGUpdates, test.Sprint("Expected Desired TG Update Annotations")) + assertion.EqualValues(exp.deploymentUpdates, r.deploymentUpdates, "Expected Deployment Updates") + assertion.Len(r.place, exp.place, "Expected Placements") + assertion.Len(r.destructiveUpdate, exp.destructive, "Expected Destructive") + assertion.Len(r.inplaceUpdate, exp.inplace, "Expected Inplace Updates") + assertion.Len(r.attributeUpdates, exp.attributeUpdates, "Expected Attribute Updates") + assertion.Len(r.reconnectUpdates, exp.reconnectUpdates, "Expected Reconnect Updates") + assertion.Len(r.disconnectUpdates, exp.disconnectUpdates, "Expected Disconnect Updates") + assertion.Len(r.stop, exp.stop, "Expected Stops") + assertion.EqualValues(exp.desiredTGUpdates, r.desiredTGUpdates, "Expected Desired TG Update Annotations") } func buildAllocations(job *structs.Job, count int, clientStatus, desiredStatus string, nodeScore float64) []*structs.Allocation { @@ -337,9 +339,7 @@ func buildDisconnectedNodes(allocs []*structs.Allocation, count int) map[string] func buildResumableAllocations(count int, clientStatus, desiredStatus string, nodeScore float64) (*structs.Job, []*structs.Allocation) { job := mock.Job() - job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - LostAfter: 5 * time.Minute, - } + job.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Minute) job.TaskGroups[0].Count = count return job, buildAllocations(job, count, clientStatus, desiredStatus, nodeScore) @@ -726,7 +726,7 @@ func TestReconciler_Inplace_Rollback(t *testing.T) { }, }) - test.MapLen(t, 1, r.desiredFollowupEvals, test.Sprint("expected 1 follow-up eval")) + assert.Len(t, r.desiredFollowupEvals, 1, "expected 1 follow-up eval") assertNamesHaveIndexes(t, intRange(0, 0), allocsToNames(r.inplaceUpdate)) assertNamesHaveIndexes(t, intRange(2, 2), stopResultsToNames(r.stop)) assertNamesHaveIndexes(t, intRange(2, 3), placeResultsToNames(r.place)) @@ -893,8 +893,6 @@ func TestReconciler_Destructive_ScaleDown(t *testing.T) { // PreventRescheduleOnLost, MaxClientDisconnect and ReschedulePolicy. // Having the 3 configurations enabled is not a valid option and is not // included in the test. -// Test using max_client_disconnect, remove after its deprecated in favor -// of Disconnect.LostAfter introduced in 1.8.0. func TestReconciler_LostNode_PreventRescheduleOnLost(t *testing.T) { disabledReschedulePolicy := &structs.ReschedulePolicy{ Attempts: 0, @@ -942,7 +940,7 @@ func TestReconciler_LostNode_PreventRescheduleOnLost(t *testing.T) { maxClientDisconnect: pointer.Of(10 * time.Second), PreventRescheduleOnLost: false, reschedulePolicy: disabledReschedulePolicy, - expectPlace: 1, + expectPlace: 2, expectStop: 1, expectIgnore: 4, expectDisconnect: 1, @@ -953,12 +951,13 @@ func TestReconciler_LostNode_PreventRescheduleOnLost(t *testing.T) { maxClientDisconnect: pointer.Of(10 * time.Second), PreventRescheduleOnLost: true, reschedulePolicy: disabledReschedulePolicy, - expectPlace: 0, + expectPlace: 1, // This behaviour needs to be verified expectStop: 0, expectIgnore: 5, expectDisconnect: 2, allocStatus: structs.AllocClientStatusUnknown, }, + { name: "PreventRescheduleOnLost off, MaxClientDisconnect off, Reschedule on", maxClientDisconnect: nil, @@ -992,8 +991,8 @@ func TestReconciler_LostNode_PreventRescheduleOnLost(t *testing.T) { Attempts: 1, }, expectPlace: 3, - expectStop: 2, - expectIgnore: 2, + expectStop: 1, + expectIgnore: 3, expectDisconnect: 1, allocStatus: structs.AllocClientStatusLost, }, @@ -1539,7 +1538,7 @@ func TestReconciler_JobStopped_TerminalAllocs(t *testing.T) { reconciler := NewAllocReconciler(testlog.HCLogger(t), allocUpdateFnIgnore, false, c.jobID, c.job, nil, allocs, nil, "", 50, true) r := reconciler.Compute() - must.SliceEmpty(t, r.stop) + require.Len(t, r.stop, 0) // Assert the correct results assertResults(t, r, &resultExpectation{ createDeployment: nil, @@ -1656,6 +1655,8 @@ func TestReconciler_MultiTG_SingleUpdateBlock(t *testing.T) { func TestReconciler_RescheduleLater_Batch(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 4 job := mock.Job() job.TaskGroups[0].Count = 4 @@ -1714,9 +1715,9 @@ func TestReconciler_RescheduleLater_Batch(t *testing.T) { // Two reschedule attempts were already made, one more can be made at a future time // Verify that the follow up eval has the expected waitUntil time evals := r.desiredFollowupEvals[tgName] - must.NotNil(t, evals) - must.SliceLen(t, 1, evals) - must.Eq(t, now.Add(delayDur), evals[0].WaitUntil) + require.NotNil(evals) + require.Equal(1, len(evals)) + require.Equal(now.Add(delayDur), evals[0].WaitUntil) // Alloc 5 should not be replaced because it is terminal assertResults(t, r, &resultExpectation{ @@ -1742,7 +1743,7 @@ func TestReconciler_RescheduleLater_Batch(t *testing.T) { for _, a := range r.attributeUpdates { annotated = a } - must.Eq(t, evals[0].ID, annotated.FollowupEvalID) + require.Equal(evals[0].ID, annotated.FollowupEvalID) } // Tests delayed rescheduling of failed batch allocations and batching of allocs @@ -1750,6 +1751,8 @@ func TestReconciler_RescheduleLater_Batch(t *testing.T) { func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 4 job := mock.Job() job.TaskGroups[0].Count = 10 @@ -1794,13 +1797,13 @@ func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) { // Verify that two follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.NotNil(t, evals) - must.SliceLen(t, 2, evals) + require.NotNil(evals) + require.Equal(2, len(evals)) // Verify expected WaitUntil values for both batched evals - must.Eq(t, now.Add(delayDur), evals[0].WaitUntil) + require.Equal(now.Add(delayDur), evals[0].WaitUntil) secondBatchDuration := delayDur + 10*time.Second - must.Eq(t, now.Add(secondBatchDuration), evals[1].WaitUntil) + require.Equal(now.Add(secondBatchDuration), evals[1].WaitUntil) // Alloc 5 should not be replaced because it is terminal assertResults(t, r, &resultExpectation{ @@ -1824,9 +1827,9 @@ func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) { // Verify that the followup evalID field is set correctly for _, alloc := range r.attributeUpdates { if allocNameToIndex(alloc.Name) < 5 { - must.Eq(t, evals[0].ID, alloc.FollowupEvalID) + require.Equal(evals[0].ID, alloc.FollowupEvalID) } else if allocNameToIndex(alloc.Name) < 7 { - must.Eq(t, evals[1].ID, alloc.FollowupEvalID) + require.Equal(evals[1].ID, alloc.FollowupEvalID) } else { t.Fatalf("Unexpected alloc name in Inplace results %v", alloc.Name) } @@ -1837,6 +1840,7 @@ func TestReconciler_RescheduleLaterWithBatchedEvals_Batch(t *testing.T) { func TestReconciler_RescheduleNow_Batch(t *testing.T) { ci.Parallel(t) + require := require.New(t) // Set desired 4 job := mock.Job() job.TaskGroups[0].Count = 4 @@ -1891,7 +1895,7 @@ func TestReconciler_RescheduleNow_Batch(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // Two reschedule attempts were made, one more can be made now // Alloc 5 should not be replaced because it is terminal @@ -1920,6 +1924,8 @@ func TestReconciler_RescheduleNow_Batch(t *testing.T) { func TestReconciler_RescheduleLater_Service(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -1967,9 +1973,9 @@ func TestReconciler_RescheduleLater_Service(t *testing.T) { // Should place a new placement and create a follow up eval for the delayed reschedule // Verify that the follow up eval has the expected waitUntil time evals := r.desiredFollowupEvals[tgName] - must.NotNil(t, evals) - must.SliceLen(t, 1, evals) - must.Eq(t, now.Add(delayDur), evals[0].WaitUntil) + require.NotNil(evals) + require.Equal(1, len(evals)) + require.Equal(now.Add(delayDur), evals[0].WaitUntil) assertResults(t, r, &resultExpectation{ createDeployment: nil, @@ -1996,7 +2002,7 @@ func TestReconciler_RescheduleLater_Service(t *testing.T) { for _, a := range r.attributeUpdates { annotated = a } - must.Eq(t, evals[0].ID, annotated.FollowupEvalID) + require.Equal(evals[0].ID, annotated.FollowupEvalID) } // Tests service allocations with client status complete @@ -2114,13 +2120,16 @@ func TestReconciler_Service_DesiredStop_ClientStatusComplete(t *testing.T) { assertNamesHaveIndexes(t, intRange(4, 4), placeResultsToNames(r.place)) // Should not have any follow up evals created - must.MapEmpty(t, r.desiredFollowupEvals) + require := require.New(t) + require.Equal(0, len(r.desiredFollowupEvals)) } // Tests rescheduling failed service allocations with desired state stop func TestReconciler_RescheduleNow_Service(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -2174,7 +2183,7 @@ func TestReconciler_RescheduleNow_Service(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // Verify that one rescheduled alloc and one replacement for terminal alloc were placed assertResults(t, r, &resultExpectation{ @@ -2202,6 +2211,8 @@ func TestReconciler_RescheduleNow_Service(t *testing.T) { func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -2254,7 +2265,7 @@ func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // Verify that one rescheduled alloc was placed assertResults(t, r, &resultExpectation{ @@ -2282,6 +2293,8 @@ func TestReconciler_RescheduleNow_WithinAllowedTimeWindow(t *testing.T) { func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -2336,7 +2349,7 @@ func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // Verify that one rescheduled alloc was placed assertResults(t, r, &resultExpectation{ @@ -2364,6 +2377,8 @@ func TestReconciler_RescheduleNow_EvalIDMatch(t *testing.T) { func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -2445,7 +2460,7 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // Verify that one rescheduled alloc and one replacement for terminal alloc were placed assertResults(t, r, &resultExpectation{ @@ -2473,6 +2488,8 @@ func TestReconciler_RescheduleNow_Service_WithCanaries(t *testing.T) { func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -2570,7 +2587,7 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // Verify that one rescheduled alloc and one replacement for terminal alloc were placed assertResults(t, r, &resultExpectation{ @@ -2599,6 +2616,8 @@ func TestReconciler_RescheduleNow_Service_Canaries(t *testing.T) { func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -2698,7 +2717,7 @@ func TestReconciler_RescheduleNow_Service_Canaries_Limit(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // Verify that one rescheduled alloc and one replacement for terminal alloc were placed assertResults(t, r, &resultExpectation{ @@ -5142,6 +5161,8 @@ func TestReconciler_SuccessfulDeploymentWithFailedAllocs_Reschedule(t *testing.T func TestReconciler_ForceReschedule_Service(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -5188,7 +5209,7 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // Verify that one rescheduled alloc was created because of the forced reschedule assertResults(t, r, &resultExpectation{ @@ -5218,6 +5239,8 @@ func TestReconciler_ForceReschedule_Service(t *testing.T) { func TestReconciler_RescheduleNot_Service(t *testing.T) { ci.Parallel(t) + require := require.New(t) + // Set desired 5 job := mock.Job() job.TaskGroups[0].Count = 5 @@ -5271,7 +5294,7 @@ func TestReconciler_RescheduleNot_Service(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // no rescheduling, ignore all 4 allocs // but place one to substitute allocs[4] that was stopped explicitly @@ -5295,40 +5318,21 @@ func TestReconciler_RescheduleNot_Service(t *testing.T) { assertPlacementsAreRescheduled(t, 0, r.place) } -type mockPicker struct { - called bool - strategy string - result string -} - -func (mp *mockPicker) PickReconnectingAlloc(disconnect *structs.DisconnectStrategy, - original *structs.Allocation, replacement *structs.Allocation) *structs.Allocation { - mp.strategy = disconnect.ReconcileStrategy() - mp.called = true - - if mp.result == "original" { - return original - - } - - return replacement -} - // Tests that when a node disconnects/reconnects allocations for that node are // reconciled according to the business rules. func TestReconciler_Disconnected_Client(t *testing.T) { - disconnectAllocState := []*structs.AllocState{ - { - Field: structs.AllocStateFieldClientStatus, - Value: structs.AllocClientStatusUnknown, - Time: time.Now(), - }, - } + disconnectAllocState := []*structs.AllocState{{ + Field: structs.AllocStateFieldClientStatus, + Value: structs.AllocClientStatusUnknown, + Time: time.Now(), + }} type testCase struct { name string allocCount int disconnectedAllocCount int + jobVersionIncrement uint64 + nodeScoreIncrement float64 disconnectedAllocStatus string disconnectedAllocStates []*structs.AllocState isBatch bool @@ -5341,18 +5345,16 @@ func TestReconciler_Disconnected_Client(t *testing.T) { shouldStopOnDisconnectedNode bool maxDisconnect *time.Duration expected *resultExpectation - pickResult string - reconcileStrategy string - callPicker bool } testCases := []testCase{ { - name: "reconnect-original-no-replacement", - allocCount: 2, - replace: false, - disconnectedAllocCount: 2, - disconnectedAllocStatus: structs.AllocClientStatusRunning, + name: "reconnect-original-no-replacement", + allocCount: 2, + replace: false, + disconnectedAllocCount: 2, + disconnectedAllocStatus: structs.AllocClientStatusRunning, + disconnectedAllocStates: disconnectAllocState, shouldStopOnDisconnectedNode: false, expected: &resultExpectation{ @@ -5363,14 +5365,14 @@ func TestReconciler_Disconnected_Client(t *testing.T) { }, }, }, - callPicker: false, }, { - name: "resume-original-and-stop-replacement", - allocCount: 3, - replace: true, - disconnectedAllocCount: 1, - disconnectedAllocStatus: structs.AllocClientStatusRunning, + name: "resume-original-and-stop-replacement", + allocCount: 3, + replace: true, + disconnectedAllocCount: 1, + disconnectedAllocStatus: structs.AllocClientStatusRunning, + disconnectedAllocStates: disconnectAllocState, shouldStopOnDisconnectedNode: false, expected: &resultExpectation{ @@ -5383,17 +5385,34 @@ func TestReconciler_Disconnected_Client(t *testing.T) { }, }, }, - maxDisconnect: pointer.Of(5 * time.Minute), - callPicker: true, - reconcileStrategy: structs.ReconcileOptionKeepOriginal, - pickResult: "original", }, { - name: "stop-original-failed-on-reconnect", - allocCount: 4, - replace: true, - disconnectedAllocCount: 2, - disconnectedAllocStatus: structs.AllocClientStatusFailed, + name: "stop-original-with-lower-node-score", + allocCount: 4, + replace: true, + disconnectedAllocCount: 1, + disconnectedAllocStatus: structs.AllocClientStatusRunning, + + disconnectedAllocStates: disconnectAllocState, + shouldStopOnDisconnectedNode: true, + nodeScoreIncrement: 1, + expected: &resultExpectation{ + stop: 1, + desiredTGUpdates: map[string]*structs.DesiredUpdates{ + "web": { + Stop: 1, + Ignore: 4, + }, + }, + }, + }, + { + name: "stop-original-failed-on-reconnect", + allocCount: 4, + replace: true, + disconnectedAllocCount: 2, + disconnectedAllocStatus: structs.AllocClientStatusFailed, + disconnectedAllocStates: disconnectAllocState, shouldStopOnDisconnectedNode: true, expected: &resultExpectation{ @@ -5407,11 +5426,12 @@ func TestReconciler_Disconnected_Client(t *testing.T) { }, }, { - name: "reschedule-original-failed-if-not-replaced", - allocCount: 4, - replace: false, - disconnectedAllocCount: 2, - disconnectedAllocStatus: structs.AllocClientStatusFailed, + name: "reschedule-original-failed-if-not-replaced", + allocCount: 4, + replace: false, + disconnectedAllocCount: 2, + disconnectedAllocStatus: structs.AllocClientStatusFailed, + disconnectedAllocStates: disconnectAllocState, shouldStopOnDisconnectedNode: true, expected: &resultExpectation{ @@ -5427,25 +5447,125 @@ func TestReconciler_Disconnected_Client(t *testing.T) { }, }, { - name: "update-reconnect-completed", + name: "ignore-reconnect-completed", allocCount: 2, replace: false, disconnectedAllocCount: 2, disconnectedAllocStatus: structs.AllocClientStatusComplete, + disconnectedAllocStates: disconnectAllocState, isBatch: true, expected: &resultExpectation{ - place: 0, + place: 2, + desiredTGUpdates: map[string]*structs.DesiredUpdates{ + "web": { + Ignore: 2, + Place: 2, + }, + }, + }, + }, + { + name: "keep-original-alloc-and-stop-failed-replacement", + allocCount: 3, + replace: true, + failReplacement: true, + disconnectedAllocCount: 2, + disconnectedAllocStatus: structs.AllocClientStatusRunning, + + disconnectedAllocStates: disconnectAllocState, + expected: &resultExpectation{ + reconnectUpdates: 2, + stop: 0, + desiredTGUpdates: map[string]*structs.DesiredUpdates{ + "web": { + Ignore: 5, + }, + }, + }, + }, + { + name: "keep-original-and-stop-reconnecting-replacement", + allocCount: 2, + replace: true, + disconnectReplacement: true, + disconnectedAllocCount: 1, + disconnectedAllocStatus: structs.AllocClientStatusRunning, + + disconnectedAllocStates: disconnectAllocState, + expected: &resultExpectation{ + reconnectUpdates: 1, + stop: 1, desiredTGUpdates: map[string]*structs.DesiredUpdates{ "web": { Ignore: 2, - Place: 0, + Stop: 1, + }, + }, + }, + }, + { + name: "keep-original-and-stop-tainted-replacement", + allocCount: 3, + replace: true, + taintReplacement: true, + disconnectedAllocCount: 2, + disconnectedAllocStatus: structs.AllocClientStatusRunning, + + disconnectedAllocStates: disconnectAllocState, + expected: &resultExpectation{ + reconnectUpdates: 2, + stop: 2, + desiredTGUpdates: map[string]*structs.DesiredUpdates{ + "web": { + Ignore: 3, + Stop: 2, }, }, }, }, { - name: "stop-original-alloc-failed-replacements-replaced", + name: "stop-original-alloc-with-old-job-version", + allocCount: 5, + replace: true, + disconnectedAllocCount: 2, + disconnectedAllocStatus: structs.AllocClientStatusRunning, + + disconnectedAllocStates: disconnectAllocState, + shouldStopOnDisconnectedNode: true, + jobVersionIncrement: 1, + expected: &resultExpectation{ + stop: 2, + desiredTGUpdates: map[string]*structs.DesiredUpdates{ + "web": { + Ignore: 5, + Stop: 2, + }, + }, + }, + }, + { + name: "stop-original-alloc-with-old-job-version-reconnect-eval", + allocCount: 5, + replace: true, + disconnectedAllocCount: 2, + disconnectedAllocStatus: structs.AllocClientStatusRunning, + + disconnectedAllocStates: disconnectAllocState, + shouldStopOnDisconnectedNode: true, + jobVersionIncrement: 1, + expected: &resultExpectation{ + stop: 2, + desiredTGUpdates: map[string]*structs.DesiredUpdates{ + "web": { + Stop: 2, + Ignore: 5, + }, + }, + }, + }, + { + name: "stop-original-alloc-with-old-job-version-and-failed-replacements-replaced", allocCount: 5, replace: true, failReplacement: true, @@ -5454,6 +5574,7 @@ func TestReconciler_Disconnected_Client(t *testing.T) { disconnectedAllocStatus: structs.AllocClientStatusRunning, disconnectedAllocStates: disconnectAllocState, shouldStopOnDisconnectedNode: false, + jobVersionIncrement: 1, expected: &resultExpectation{ stop: 2, reconnectUpdates: 2, @@ -5466,11 +5587,12 @@ func TestReconciler_Disconnected_Client(t *testing.T) { }, }, { - name: "stop-original-pending-alloc-for-disconnected-node", - allocCount: 2, - replace: true, - disconnectedAllocCount: 1, - disconnectedAllocStatus: structs.AllocClientStatusPending, + name: "stop-original-pending-alloc-for-disconnected-node", + allocCount: 2, + replace: true, + disconnectedAllocCount: 1, + disconnectedAllocStatus: structs.AllocClientStatusPending, + disconnectedAllocStates: disconnectAllocState, shouldStopOnDisconnectedNode: true, nodeStatusDisconnected: true, @@ -5485,22 +5607,23 @@ func TestReconciler_Disconnected_Client(t *testing.T) { }, }, { - name: "stop-failed-original-and-failed-replacements-and-place-new", - allocCount: 5, - replace: true, - failReplacement: true, - disconnectedAllocCount: 2, - disconnectedAllocStatus: structs.AllocClientStatusFailed, + name: "stop-failed-original-and-failed-replacements-and-place-new", + allocCount: 5, + replace: true, + failReplacement: true, + disconnectedAllocCount: 2, + disconnectedAllocStatus: structs.AllocClientStatusFailed, + disconnectedAllocStates: disconnectAllocState, shouldStopOnDisconnectedNode: true, expected: &resultExpectation{ - stop: 4, + stop: 2, place: 2, desiredTGUpdates: map[string]*structs.DesiredUpdates{ "web": { - Stop: 4, + Stop: 2, Place: 2, - Ignore: 3, + Ignore: 5, }, }, }, @@ -5573,10 +5696,6 @@ func TestReconciler_Disconnected_Client(t *testing.T) { if tc.maxDisconnect != nil { alloc.Job.TaskGroups[0].MaxClientDisconnect = tc.maxDisconnect - alloc.Job.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - LostAfter: *tc.maxDisconnect, - Reconcile: tc.reconcileStrategy, - } } if disconnectedAllocCount > 0 { @@ -5606,6 +5725,12 @@ func TestReconciler_Disconnected_Client(t *testing.T) { replacement.CreateIndex += 1 alloc.NextAllocation = replacement.ID + if tc.jobVersionIncrement != 0 { + replacement.Job.Version = replacement.Job.Version + tc.jobVersionIncrement + } + if tc.nodeScoreIncrement != 0 { + replacement.Metrics.ScoreMetaData[0].NormScore = replacement.Metrics.ScoreMetaData[0].NormScore + tc.nodeScoreIncrement + } if tc.taintReplacement { replacement.DesiredTransition.Migrate = pointer.Of(true) } @@ -5623,6 +5748,8 @@ func TestReconciler_Disconnected_Client(t *testing.T) { nextReplacement.ClientStatus = structs.AllocClientStatusRunning nextReplacement.DesiredStatus = structs.AllocDesiredStatusRun nextReplacement.PreviousAllocation = replacement.ID + nextReplacement.CreateIndex += 1 + replacement.NextAllocation = nextReplacement.ID replacement.DesiredStatus = structs.AllocDesiredStatusStop @@ -5644,18 +5771,9 @@ func TestReconciler_Disconnected_Client(t *testing.T) { reconciler.now = time.Now().Add(*tc.maxDisconnect * 20) } - mpc := &mockPicker{ - result: tc.pickResult, - } - - reconciler.reconnectingPicker = mpc - results := reconciler.Compute() assertResults(t, results, tc.expected) - must.Eq(t, tc.reconcileStrategy, mpc.strategy) - must.Eq(t, tc.callPicker, mpc.called) - for _, stopResult := range results.stop { // Skip replacement allocs. if !origAllocs.Contains(stopResult.alloc.ID) { @@ -5665,10 +5783,10 @@ func TestReconciler_Disconnected_Client(t *testing.T) { if tc.shouldStopOnDisconnectedNode { must.Eq(t, testNode.ID, stopResult.alloc.NodeID) } else { - must.NotEq(t, testNode.ID, stopResult.alloc.NodeID) + require.NotEqual(t, testNode.ID, stopResult.alloc.NodeID) } - must.Eq(t, job.Version, stopResult.alloc.Job.Version) + require.Equal(t, job.Version, stopResult.alloc.Job.Version) } }) } @@ -5679,6 +5797,7 @@ func TestReconciler_Disconnected_Client(t *testing.T) { func TestReconciler_RescheduleNot_Batch(t *testing.T) { ci.Parallel(t) + require := require.New(t) // Set desired 4 job := mock.Job() job.TaskGroups[0].Count = 4 @@ -5738,7 +5857,7 @@ func TestReconciler_RescheduleNot_Batch(t *testing.T) { // Verify that no follow up evals were created evals := r.desiredFollowupEvals[tgName] - must.Nil(t, evals) + require.Nil(evals) // No reschedule attempts were made and all allocs are untouched assertResults(t, r, &resultExpectation{ @@ -5770,19 +5889,19 @@ func TestReconciler_Node_Disconnect_Updates_Alloc_To_Unknown(t *testing.T) { // Verify that 1 follow up eval was created with the values we expect. evals := results.desiredFollowupEvals[job.TaskGroups[0].Name] - must.SliceLen(t, 1, evals) + require.Len(t, evals, 1) expectedTime := reconciler.now.Add(5 * time.Minute) eval := evals[0] - must.NotNil(t, eval.WaitUntil) - must.Eq(t, expectedTime, eval.WaitUntil) + require.NotNil(t, eval.WaitUntil) + require.Equal(t, expectedTime, eval.WaitUntil) // Validate that the queued disconnectUpdates have the right client status, // and that they have a valid FollowUpdEvalID. for _, disconnectUpdate := range results.disconnectUpdates { - must.Eq(t, structs.AllocClientStatusUnknown, disconnectUpdate.ClientStatus) - must.NotEq(t, "", disconnectUpdate.FollowupEvalID) - must.Eq(t, eval.ID, disconnectUpdate.FollowupEvalID) + require.Equal(t, structs.AllocClientStatusUnknown, disconnectUpdate.ClientStatus) + require.NotEmpty(t, disconnectUpdate.FollowupEvalID) + require.Equal(t, eval.ID, disconnectUpdate.FollowupEvalID) } // 2 to place, 2 to update, 1 to ignore @@ -6122,7 +6241,7 @@ func TestReconciler_Client_Disconnect_Canaries(t *testing.T) { } } - must.Eq(t, tc.deploymentState.DesiredTotal, allocsConfigured, must.Sprintf("invalid alloc configuration: expect %d got %d", tc.deploymentState.DesiredTotal, allocsConfigured)) + require.Equal(t, tc.deploymentState.DesiredTotal, allocsConfigured, "invalid alloc configuration: expect %d got %d", tc.deploymentState.DesiredTotal, allocsConfigured) // Populate Alloc IDS, Node IDs, Job on canaries canariesConfigured := 0 @@ -6154,7 +6273,7 @@ func TestReconciler_Client_Disconnect_Canaries(t *testing.T) { } // Validate tc.canaryAllocs against tc.deploymentState - must.Eq(t, tc.deploymentState.PlacedAllocs, canariesConfigured, must.Sprintf("invalid canary configuration: expect %d got %d", tc.deploymentState.PlacedAllocs, canariesConfigured)) + require.Equal(t, tc.deploymentState.PlacedAllocs, canariesConfigured, "invalid canary configuration: expect %d got %d", tc.deploymentState.PlacedAllocs, canariesConfigured) deployment := structs.NewDeployment(updatedJob, 50) deployment.TaskGroups[updatedJob.TaskGroups[0].Name] = tc.deploymentState @@ -6183,33 +6302,33 @@ func TestReconciler_Client_Disconnect_Canaries(t *testing.T) { // and that they have a disconnect update. for _, placeResult := range result.place { found := false - must.NotNil(t, placeResult.previousAlloc) + require.NotNil(t, placeResult.previousAlloc) for _, deployed := range tc.deployedAllocs[disconnectedNode] { if deployed.ID == placeResult.previousAlloc.ID { found = true - must.Eq(t, job.Version, placeResult.previousAlloc.Job.Version) - must.Eq(t, disconnectedNode.ID, placeResult.previousAlloc.NodeID) + require.Equal(t, job.Version, placeResult.previousAlloc.Job.Version) + require.Equal(t, disconnectedNode.ID, placeResult.previousAlloc.NodeID) _, exists := result.disconnectUpdates[placeResult.previousAlloc.ID] - must.True(t, exists) + require.True(t, exists) break } } for _, canary := range tc.canaryAllocs[disconnectedNode] { if canary.ID == placeResult.previousAlloc.ID { found = true - must.Eq(t, updatedJob.Version, placeResult.previousAlloc.Job.Version) - must.Eq(t, disconnectedNode.ID, placeResult.previousAlloc.NodeID) + require.Equal(t, updatedJob.Version, placeResult.previousAlloc.Job.Version) + require.Equal(t, disconnectedNode.ID, placeResult.previousAlloc.NodeID) _, exists := result.disconnectUpdates[placeResult.previousAlloc.ID] - must.True(t, exists) + require.True(t, exists) break } } - must.True(t, found) + require.True(t, found) } // Validate that stops are for pending disconnects for _, stopResult := range result.stop { - must.Eq(t, pending, stopResult.alloc.ClientStatus) + require.Equal(t, pending, stopResult.alloc.ClientStatus) } }) } @@ -6296,7 +6415,7 @@ func TestReconciler_ComputeDeploymentPaused(t *testing.T) { job = mock.BatchJob() } - must.NotNil(t, job, must.Sprint("invalid job type", tc.jobType)) + require.NotNil(t, job, "invalid job type", tc.jobType) var deployment *structs.Deployment if tc.isMultiregion { @@ -6326,7 +6445,7 @@ func TestReconciler_ComputeDeploymentPaused(t *testing.T) { _ = reconciler.Compute() - must.Eq(t, tc.expected, reconciler.deploymentPaused) + require.Equal(t, tc.expected, reconciler.deploymentPaused) }) } } diff --git a/scheduler/reconcile_util.go b/scheduler/reconcile_util.go index 25c7f5431520..18d8af5c4e5b 100644 --- a/scheduler/reconcile_util.go +++ b/scheduler/reconcile_util.go @@ -277,7 +277,7 @@ func (a allocSet) filterByTainted(taintedNodes map[string]*structs.Node, serverS } } else { - if alloc.PreventRescheduleOnDisconnect() { + if alloc.PreventRescheduleOnLost() { if alloc.ClientStatus == structs.AllocClientStatusRunning { disconnecting[alloc.ID] = alloc continue @@ -293,9 +293,9 @@ func (a allocSet) filterByTainted(taintedNodes map[string]*structs.Node, serverS } if alloc.TerminalStatus() && !reconnect { - // Server-terminal allocs, if supportsDisconnectedClient and not reconnect, + // Terminal allocs, if supportsDisconnectedClient and not reconnect, // are probably stopped replacements and should be ignored - if supportsDisconnectedClients && alloc.ServerTerminalStatus() { + if supportsDisconnectedClients { ignore[alloc.ID] = alloc continue } @@ -364,7 +364,7 @@ func (a allocSet) filterByTainted(taintedNodes map[string]*structs.Node, serverS // Allocs on terminal nodes that can't be rescheduled need to be treated // differently than those that can. if taintedNode.TerminalStatus() { - if alloc.PreventRescheduleOnDisconnect() { + if alloc.PreventRescheduleOnLost() { if alloc.ClientStatus == structs.AllocClientStatusUnknown { untainted[alloc.ID] = alloc continue @@ -505,7 +505,7 @@ func updateByReschedulable(alloc *structs.Allocation, now time.Time, evalID stri var eligible bool switch { case isDisconnecting: - rescheduleTime, eligible = alloc.RescheduleTimeOnDisconnect(now) + rescheduleTime, eligible = alloc.NextRescheduleTimeByTime(now) case alloc.ClientStatus == structs.AllocClientStatusUnknown && alloc.FollowupEvalID == evalID: lastDisconnectTime := alloc.LastUnknown() @@ -554,7 +554,7 @@ func (a allocSet) filterByDeployment(id string) (match, nonmatch allocSet) { } // delayByStopAfterClientDisconnect returns a delay for any lost allocation that's got a -// disconnect.stop_on_client_after configured +// stop_after_client_disconnect configured func (a allocSet) delayByStopAfterClientDisconnect() (later []*delayedRescheduleInfo) { now := time.Now().UTC() for _, a := range a { diff --git a/scheduler/reconcile_util_test.go b/scheduler/reconcile_util_test.go index f5c64e5be3cf..268c963d52c9 100644 --- a/scheduler/reconcile_util_test.go +++ b/scheduler/reconcile_util_test.go @@ -14,76 +14,9 @@ import ( "github.com/shoenig/test/must" ) -func testJob_Deprecated() *structs.Job { - testJob := mock.Job() - testJob.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Second) - - return testJob -} - -func testJobSingle_Deprecated() *structs.Job { - testJobSingle := mock.Job() - testJobSingle.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Second) - testJobSingle.TaskGroups[0].PreventRescheduleOnLost = true - - return testJobSingle -} - -func testJobNoMaxDisconnect_Deprecated() *structs.Job { - testJobNoMaxDisconnect := mock.Job() - testJobNoMaxDisconnect.TaskGroups[0].MaxClientDisconnect = nil - - return testJobNoMaxDisconnect -} - -func testJobNoMaxDisconnectSingle_Deprecated() *structs.Job { - testJobNoMaxDisconnectSingle := mock.Job() - testJobNoMaxDisconnectSingle.TaskGroups[0].MaxClientDisconnect = nil - testJobNoMaxDisconnectSingle.TaskGroups[0].PreventRescheduleOnLost = true - - return testJobNoMaxDisconnectSingle -} - -func testJob_Disconnected() *structs.Job { - testJob := mock.Job() - testJob.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - LostAfter: 5 * time.Second, - Replace: pointer.Of(true), - } - - return testJob -} - -func testJobSingle_Disconnected() *structs.Job { - testJobSingle := mock.Job() - testJobSingle.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - LostAfter: 5 * time.Second, - Replace: pointer.Of(true), - } - return testJobSingle -} - -func testJobNoMaxDisconnect_Disconnected() *structs.Job { - testJobNoMaxDisconnect := mock.Job() - testJobNoMaxDisconnect.TaskGroups[0].Disconnect = nil - - return testJobNoMaxDisconnect -} - -func testJobNoMaxDisconnectSingle_Disconnected() *structs.Job { - testJobNoMaxDisconnectSingle := mock.Job() - testJobNoMaxDisconnectSingle.TaskGroups[0].Disconnect = &structs.DisconnectStrategy{ - LostAfter: 0 * time.Second, - Replace: pointer.Of(false), - } - - return testJobNoMaxDisconnectSingle -} - func TestAllocSet_filterByTainted(t *testing.T) { ci.Parallel(t) - now := time.Now() nodes := map[string]*structs.Node{ "draining": { ID: "draining", @@ -104,6 +37,21 @@ func TestAllocSet_filterByTainted(t *testing.T) { }, } + testJob := mock.Job() + testJob.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Second) + now := time.Now() + + testJobSingle := mock.Job() + testJobSingle.TaskGroups[0].MaxClientDisconnect = pointer.Of(5 * time.Second) + testJobSingle.TaskGroups[0].PreventRescheduleOnLost = true + + testJobNoMaxDisconnect := mock.Job() + testJobNoMaxDisconnect.TaskGroups[0].MaxClientDisconnect = nil + + testJobNoMaxDisconnectSingle := mock.Job() + testJobNoMaxDisconnectSingle.TaskGroups[0].MaxClientDisconnect = nil + testJobNoMaxDisconnectSingle.TaskGroups[0].PreventRescheduleOnLost = true + unknownAllocState := []*structs.AllocState{{ Field: structs.AllocStateFieldClientStatus, Value: structs.AllocClientStatusUnknown, @@ -129,1286 +77,1254 @@ func TestAllocSet_filterByTainted(t *testing.T) { }, } - jobDefinitions := []struct { - name string - testJob func() *structs.Job - testJobSingle func() *structs.Job - testJobNoMaxDisconnect func() *structs.Job - testJobNoMaxDisconnectSingle func() *structs.Job - }{ - // Test using max_client_disconnect, remove after its deprecated in favor - // of Disconnect.LostAfter introduced in 1.8.0. + type testCase struct { + name string + all allocSet + taintedNodes map[string]*structs.Node + supportsDisconnectedClients bool + skipNilNodeTest bool + now time.Time + PreventRescheduleOnLost bool + // expected results + untainted allocSet + migrate allocSet + lost allocSet + disconnecting allocSet + reconnecting allocSet + ignore allocSet + expiring allocSet + } + + testCases := []testCase{ + // These two cases test that we maintain parity with pre-disconnected-clients behavior. + { + name: "lost-client", + supportsDisconnectedClients: false, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + "untainted1": { + ID: "untainted1", + ClientStatus: structs.AllocClientStatusRunning, + Job: testJob, + NodeID: "normal", + }, + // Terminal allocs are always untainted + "untainted2": { + ID: "untainted2", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJob, + NodeID: "normal", + }, + // Terminal allocs are always untainted, even on draining nodes + "untainted3": { + ID: "untainted3", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJob, + NodeID: "draining", + }, + // Terminal allocs are always untainted, even on lost nodes + "untainted4": { + ID: "untainted4", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJob, + NodeID: "lost", + }, + // Non-terminal alloc with migrate=true should migrate on a draining node + "migrating1": { + ID: "migrating1", + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, + Job: testJob, + NodeID: "draining", + }, + // Non-terminal alloc with migrate=true should migrate on an unknown node + "migrating2": { + ID: "migrating2", + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, + Job: testJob, + NodeID: "nil", + }, + }, + untainted: allocSet{ + "untainted1": { + ID: "untainted1", + ClientStatus: structs.AllocClientStatusRunning, + Job: testJob, + NodeID: "normal", + }, + // Terminal allocs are always untainted + "untainted2": { + ID: "untainted2", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJob, + NodeID: "normal", + }, + // Terminal allocs are always untainted, even on draining nodes + "untainted3": { + ID: "untainted3", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJob, + NodeID: "draining", + }, + // Terminal allocs are always untainted, even on lost nodes + "untainted4": { + ID: "untainted4", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJob, + NodeID: "lost", + }, + }, + migrate: allocSet{ + // Non-terminal alloc with migrate=true should migrate on a draining node + "migrating1": { + ID: "migrating1", + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, + Job: testJob, + NodeID: "draining", + }, + // Non-terminal alloc with migrate=true should migrate on an unknown node + "migrating2": { + ID: "migrating2", + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, + Job: testJob, + NodeID: "nil", + }, + }, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, { - name: "old_definitions_deprecated", - testJob: testJob_Deprecated, - testJobSingle: testJobSingle_Deprecated, - testJobNoMaxDisconnect: testJobNoMaxDisconnect_Deprecated, - testJobNoMaxDisconnectSingle: testJobNoMaxDisconnectSingle_Deprecated, + name: "lost-client-only-tainted-nodes", + supportsDisconnectedClients: false, + now: time.Now(), + taintedNodes: nodes, + // The logic associated with this test case can only trigger if there + // is a tainted node. Therefore, testing with a nil node set produces + // false failures, so don't perform that test if in this case. + skipNilNodeTest: true, + all: allocSet{ + // Non-terminal allocs on lost nodes are lost + "lost1": { + ID: "lost1", + ClientStatus: structs.AllocClientStatusPending, + Job: testJob, + NodeID: "lost", + }, + // Non-terminal allocs on lost nodes are lost + "lost2": { + ID: "lost2", + ClientStatus: structs.AllocClientStatusRunning, + Job: testJob, + NodeID: "lost", + }, + }, + untainted: allocSet{}, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{ + // Non-terminal allocs on lost nodes are lost + "lost1": { + ID: "lost1", + ClientStatus: structs.AllocClientStatusPending, + Job: testJob, + NodeID: "lost", + }, + // Non-terminal allocs on lost nodes are lost + "lost2": { + ID: "lost2", + ClientStatus: structs.AllocClientStatusRunning, + Job: testJob, + NodeID: "lost", + }, + }, + expiring: allocSet{}, }, { - name: "new_definitions_using_disconnect_block", - testJob: testJob_Deprecated, - testJobSingle: testJobSingle_Deprecated, - testJobNoMaxDisconnect: testJobNoMaxDisconnect_Deprecated, - testJobNoMaxDisconnectSingle: testJobNoMaxDisconnectSingle_Deprecated, + name: "disco-client-disconnect-unset-max-disconnect", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: true, + all: allocSet{ + // Non-terminal allocs on disconnected nodes w/o max-disconnect are lost + "lost-running": { + ID: "lost-running", + Name: "lost-running", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnect, + NodeID: "disconnected", + TaskGroup: "web", + }, + }, + untainted: allocSet{}, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{ + "lost-running": { + ID: "lost-running", + Name: "lost-running", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnect, + NodeID: "disconnected", + TaskGroup: "web", + }, + }, + expiring: allocSet{}, }, - } - - for _, jd := range jobDefinitions { - testJob := jd.testJob() - testJobSingle := jd.testJobSingle() - testJobNoMaxDisconnect := jd.testJobNoMaxDisconnect() - testJobNoMaxDisconnectSingle := jd.testJobNoMaxDisconnectSingle() - - t.Run(jd.name, func(t *testing.T) { - testCases := []struct { - name string - all allocSet - taintedNodes map[string]*structs.Node - supportsDisconnectedClients bool - skipNilNodeTest bool - now time.Time - PreventRescheduleOnLost bool - // expected results - untainted allocSet - migrate allocSet - lost allocSet - disconnecting allocSet - reconnecting allocSet - ignore allocSet - expiring allocSet - }{ // These two cases test that we maintain parity with pre-disconnected-clients behavior. - { - name: "lost-client", - supportsDisconnectedClients: false, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - "untainted1": { - ID: "untainted1", - ClientStatus: structs.AllocClientStatusRunning, - Job: testJob, - NodeID: "normal", - }, - // Terminal allocs are always untainted - "untainted2": { - ID: "untainted2", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJob, - NodeID: "normal", - }, - // Terminal allocs are always untainted, even on draining nodes - "untainted3": { - ID: "untainted3", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJob, - NodeID: "draining", - }, - // Terminal allocs are always untainted, even on lost nodes - "untainted4": { - ID: "untainted4", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJob, - NodeID: "lost", - }, - // Non-terminal alloc with migrate=true should migrate on a draining node - "migrating1": { - ID: "migrating1", - ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, - Job: testJob, - NodeID: "draining", - }, - // Non-terminal alloc with migrate=true should migrate on an unknown node - "migrating2": { - ID: "migrating2", - ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, - Job: testJob, - NodeID: "nil", - }, - }, - untainted: allocSet{ - "untainted1": { - ID: "untainted1", - ClientStatus: structs.AllocClientStatusRunning, - Job: testJob, - NodeID: "normal", - }, - // Terminal allocs are always untainted - "untainted2": { - ID: "untainted2", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJob, - NodeID: "normal", - }, - // Terminal allocs are always untainted, even on draining nodes - "untainted3": { - ID: "untainted3", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJob, - NodeID: "draining", - }, - // Terminal allocs are always untainted, even on lost nodes - "untainted4": { - ID: "untainted4", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJob, - NodeID: "lost", - }, - }, - migrate: allocSet{ - // Non-terminal alloc with migrate=true should migrate on a draining node - "migrating1": { - ID: "migrating1", - ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, - Job: testJob, - NodeID: "draining", - }, - // Non-terminal alloc with migrate=true should migrate on an unknown node - "migrating2": { - ID: "migrating2", - ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, - Job: testJob, - NodeID: "nil", - }, - }, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - name: "lost-client-only-tainted-nodes", - supportsDisconnectedClients: false, - now: time.Now(), - taintedNodes: nodes, - // The logic associated with this test case can only trigger if there - // is a tainted node. Therefore, testing with a nil node set produces - // false failures, so don't perform that test if in this case. - skipNilNodeTest: true, - all: allocSet{ - // Non-terminal allocs on lost nodes are lost - "lost1": { - ID: "lost1", - ClientStatus: structs.AllocClientStatusPending, - Job: testJob, - NodeID: "lost", - }, - // Non-terminal allocs on lost nodes are lost - "lost2": { - ID: "lost2", - ClientStatus: structs.AllocClientStatusRunning, - Job: testJob, - NodeID: "lost", - }, - }, - untainted: allocSet{}, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{ - // Non-terminal allocs on lost nodes are lost - "lost1": { - ID: "lost1", - ClientStatus: structs.AllocClientStatusPending, - Job: testJob, - NodeID: "lost", - }, - // Non-terminal allocs on lost nodes are lost - "lost2": { - ID: "lost2", - ClientStatus: structs.AllocClientStatusRunning, - Job: testJob, - NodeID: "lost", - }, - }, - expiring: allocSet{}, - }, - { - name: "disco-client-disconnect-unset-max-disconnect", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: true, - all: allocSet{ - // Non-terminal allocs on disconnected nodes w/o max-disconnect are lost - "lost-running": { - ID: "lost-running", - Name: "lost-running", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnect, - NodeID: "disconnected", - TaskGroup: "web", - }, - }, - untainted: allocSet{}, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{ - "lost-running": { - ID: "lost-running", - Name: "lost-running", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnect, - NodeID: "disconnected", - TaskGroup: "web", - }, - }, - expiring: allocSet{}, - }, - // Everything below this line tests the disconnected client mode. - { - name: "disco-client-untainted-reconnect-failed-and-replaced", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - "running-replacement": { - ID: "running-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "failed-original", - }, - // Failed and replaced allocs on reconnected nodes - // that are still desired-running are reconnected so - // we can stop them - "failed-original": { - ID: "failed-original", - Name: "web", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - untainted: allocSet{ - "running-replacement": { - ID: "running-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "failed-original", - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{ - "failed-original": { - ID: "failed-original", - Name: "web", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - name: "disco-client-reconnecting-running-no-replacement", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - // Running allocs on reconnected nodes with no replacement are reconnecting. - // Node.UpdateStatus has already handled syncing client state so this - // should be a noop. - "reconnecting-running-no-replacement": { - ID: "reconnecting-running-no-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - untainted: allocSet{}, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{ - "reconnecting-running-no-replacement": { - ID: "reconnecting-running-no-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - name: "disco-client-terminal", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - // Allocs on reconnected nodes that are complete need to be updated to stop - "untainted-reconnect-complete": { - ID: "untainted-reconnect-complete", - Name: "untainted-reconnect-complete", - ClientStatus: structs.AllocClientStatusComplete, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - // Failed allocs on reconnected nodes are in reconnecting so that - // they be marked with desired status stop at the server. - "reconnecting-failed": { - ID: "reconnecting-failed", - Name: "reconnecting-failed", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - // Lost allocs on reconnected nodes don't get restarted - "ignored-reconnect-lost": { - ID: "ignored-reconnect-lost", - Name: "ignored-reconnect-lost", - ClientStatus: structs.AllocClientStatusLost, - DesiredStatus: structs.AllocDesiredStatusStop, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - // Replacement allocs that are complete need to be updated - "untainted-reconnect-complete-replacement": { - ID: "untainted-reconnect-complete-replacement", - Name: "untainted-reconnect-complete", - ClientStatus: structs.AllocClientStatusComplete, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - PreviousAllocation: "untainted-reconnect-complete", - }, - // Replacement allocs on reconnected nodes that are failed are ignored - "ignored-reconnect-failed-replacement": { - ID: "ignored-reconnect-failed-replacement", - Name: "ignored-reconnect-failed", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusStop, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "reconnecting-failed", - }, - // Lost replacement allocs on reconnected nodes don't get restarted - "ignored-reconnect-lost-replacement": { - ID: "ignored-reconnect-lost-replacement", - Name: "ignored-reconnect-lost", - ClientStatus: structs.AllocClientStatusLost, - DesiredStatus: structs.AllocDesiredStatusStop, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - PreviousAllocation: "untainted-reconnect-lost", - }, - }, - untainted: allocSet{ - "untainted-reconnect-complete": { - ID: "untainted-reconnect-complete", - Name: "untainted-reconnect-complete", - ClientStatus: structs.AllocClientStatusComplete, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - "untainted-reconnect-complete-replacement": { - ID: "untainted-reconnect-complete-replacement", - Name: "untainted-reconnect-complete", - ClientStatus: structs.AllocClientStatusComplete, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - PreviousAllocation: "untainted-reconnect-complete", - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{ - "reconnecting-failed": { - ID: "reconnecting-failed", - Name: "reconnecting-failed", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - ignore: allocSet{ - "ignored-reconnect-lost": { - ID: "ignored-reconnect-lost", - Name: "ignored-reconnect-lost", - ClientStatus: structs.AllocClientStatusLost, - DesiredStatus: structs.AllocDesiredStatusStop, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - "ignored-reconnect-failed-replacement": { - ID: "ignored-reconnect-failed-replacement", - Name: "ignored-reconnect-failed", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusStop, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "reconnecting-failed", - }, - "ignored-reconnect-lost-replacement": { - ID: "ignored-reconnect-lost-replacement", - Name: "ignored-reconnect-lost", - ClientStatus: structs.AllocClientStatusLost, - DesiredStatus: structs.AllocDesiredStatusStop, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - PreviousAllocation: "untainted-reconnect-lost", - }, - }, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - name: "disco-client-disconnect", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: true, - all: allocSet{ - // Non-terminal allocs on disconnected nodes are disconnecting - "disconnect-running": { - ID: "disconnect-running", - Name: "disconnect-running", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - }, - // Unknown allocs on disconnected nodes are acknowledge, so they wont be rescheduled again - "untainted-unknown": { - ID: "untainted-unknown", - Name: "untainted-unknown", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - // Unknown allocs on disconnected nodes are lost when expired - "expiring-unknown": { - ID: "expiring-unknown", - Name: "expiring-unknown", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - AllocStates: expiredAllocState, - }, - // Pending allocs on disconnected nodes are lost - "lost-pending": { - ID: "lost-pending", - Name: "lost-pending", - ClientStatus: structs.AllocClientStatusPending, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - }, - // Expired allocs on reconnected clients are lost - "expiring-expired": { - ID: "expiring-expired", - Name: "expiring-expired", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: expiredAllocState, - }, - // Failed and stopped allocs on disconnected nodes are ignored - "ignore-reconnected-failed-stopped": { - ID: "ignore-reconnected-failed-stopped", - Name: "ignore-reconnected-failed-stopped", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusStop, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - untainted: allocSet{ - // Unknown allocs on disconnected nodes are acknowledge, so they wont be rescheduled again - "untainted-unknown": { - ID: "untainted-unknown", - Name: "untainted-unknown", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{ - "disconnect-running": { - ID: "disconnect-running", - Name: "disconnect-running", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - }, - }, - reconnecting: allocSet{}, - ignore: allocSet{ - "ignore-reconnected-failed-stopped": { - ID: "ignore-reconnected-failed-stopped", - Name: "ignore-reconnected-failed-stopped", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusStop, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - lost: allocSet{ - "lost-pending": { - ID: "lost-pending", - Name: "lost-pending", - ClientStatus: structs.AllocClientStatusPending, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - }, - }, - expiring: allocSet{ - "expiring-unknown": { - ID: "expiring-unknown", - Name: "expiring-unknown", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "disconnected", - TaskGroup: "web", - AllocStates: expiredAllocState, - }, - "expiring-expired": { - ID: "expiring-expired", - Name: "expiring-expired", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: expiredAllocState, - }, - }, + // Everything below this line tests the disconnected client mode. + { + name: "disco-client-untainted-reconnect-failed-and-replaced", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + "running-replacement": { + ID: "running-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "failed-original", }, - { - name: "disco-client-reconnect", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - // Expired allocs on reconnected clients are lost - "expired-reconnect": { - ID: "expired-reconnect", - Name: "expired-reconnect", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: expiredAllocState, - }, - }, - untainted: allocSet{}, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{ - "expired-reconnect": { - ID: "expired-reconnect", - Name: "expired-reconnect", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: expiredAllocState, - }, - }, + // Failed and replaced allocs on reconnected nodes + // that are still desired-running are reconnected so + // we can stop them + "failed-original": { + ID: "failed-original", + Name: "web", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, }, - { - name: "disco-client-running-reconnecting-and-replacement-untainted", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - "running-replacement": { - ID: "running-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "running-original", - }, - // Running and replaced allocs on reconnected nodes are reconnecting - "running-original": { - ID: "running-original", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - untainted: allocSet{ - "running-replacement": { - ID: "running-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "running-original", - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{ - "running-original": { - ID: "running-original", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - // After an alloc is reconnected, it should be considered - // "untainted" instead of "reconnecting" to allow changes such as - // job updates to be applied properly. - name: "disco-client-reconnected-alloc-untainted", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - "running-reconnected": { - ID: "running-reconnected", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: reconnectedAllocState, - }, - }, - untainted: allocSet{ - "running-reconnected": { - ID: "running-reconnected", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJob, - NodeID: "normal", - TaskGroup: "web", - AllocStates: reconnectedAllocState, - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - // Everything below this line tests the single instance on lost mode. - { - name: "lost-client-single-instance-on", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - "untainted1": { - ID: "untainted1", - ClientStatus: structs.AllocClientStatusRunning, - Job: testJobSingle, - NodeID: "normal", - }, - // Terminal allocs are always untainted - "untainted2": { - ID: "untainted2", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJobSingle, - NodeID: "normal", - }, - // Terminal allocs are always untainted, even on draining nodes - "untainted3": { - ID: "untainted3", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJobSingle, - NodeID: "draining", - }, - // Terminal allocs are always untainted, even on lost nodes - "untainted4": { - ID: "untainted4", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJobSingle, - NodeID: "lost", - }, - // Non-terminal alloc with migrate=true should migrate on a draining node - "migrating1": { - ID: "migrating1", - ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, - Job: testJobSingle, - NodeID: "draining", - }, - // Non-terminal alloc with migrate=true should migrate on an unknown node - "migrating2": { - ID: "migrating2", - ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, - Job: testJobSingle, - NodeID: "nil", - }, - }, - untainted: allocSet{ - "untainted1": { - ID: "untainted1", - ClientStatus: structs.AllocClientStatusRunning, - Job: testJobSingle, - NodeID: "normal", - }, - // Terminal allocs are always untainted - "untainted2": { - ID: "untainted2", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJobSingle, - NodeID: "normal", - }, - // Terminal allocs are always untainted, even on draining nodes - "untainted3": { - ID: "untainted3", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJobSingle, - NodeID: "draining", - }, - // Terminal allocs are always untainted, even on lost nodes - "untainted4": { - ID: "untainted4", - ClientStatus: structs.AllocClientStatusComplete, - Job: testJobSingle, - NodeID: "lost", - }, - }, - migrate: allocSet{ - // Non-terminal alloc with migrate=true should migrate on a draining node - "migrating1": { - ID: "migrating1", - ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, - Job: testJobSingle, - NodeID: "draining", - }, - // Non-terminal alloc with migrate=true should migrate on an unknown node - "migrating2": { - ID: "migrating2", - ClientStatus: structs.AllocClientStatusRunning, - DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, - Job: testJobSingle, - NodeID: "nil", - }, - }, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - name: "lost-client-only-tainted-nodes-single-instance-on", - supportsDisconnectedClients: false, - now: time.Now(), - taintedNodes: nodes, - // The logic associated with this test case can only trigger if there - // is a tainted node. Therefore, testing with a nil node set produces - // false failures, so don't perform that test if in this case. - skipNilNodeTest: true, - all: allocSet{ - // Non-terminal allocs on lost nodes are lost - "lost1": { - ID: "lost1", - ClientStatus: structs.AllocClientStatusPending, - Job: testJobSingle, - NodeID: "lost", - }, - // Non-terminal allocs on lost nodes are lost - "lost2": { - ID: "lost2", - ClientStatus: structs.AllocClientStatusRunning, - Job: testJobSingle, - NodeID: "lost", - }, - }, - untainted: allocSet{}, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{ - // Non-terminal allocs on lost nodes are lost - "lost1": { - ID: "lost1", - ClientStatus: structs.AllocClientStatusPending, - Job: testJobSingle, - NodeID: "lost", - }, - // Non-terminal allocs on lost nodes are lost - "lost2": { - ID: "lost2", - ClientStatus: structs.AllocClientStatusRunning, - Job: testJobSingle, - NodeID: "lost", - }, - }, - expiring: allocSet{}, - }, - { - name: "disco-client-disconnect-unset-max-disconnect-single-instance-on", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: true, - all: allocSet{ - // Non-terminal allocs on disconnected nodes w/o max-disconnect are lost - "disconnecting-running": { - ID: "disconnecting-running", - Name: "disconnecting-running", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnectSingle, - NodeID: "disconnected", - TaskGroup: "web", - }, - }, - untainted: allocSet{}, - migrate: allocSet{}, - disconnecting: allocSet{"disconnecting-running": { - ID: "disconnecting-running", - Name: "disconnecting-running", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnectSingle, - NodeID: "disconnected", - TaskGroup: "web", - }}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - name: "disco-client-untainted-reconnect-failed-and-replaced-single-instance-on", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - "running-replacement": { - ID: "running-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "failed-original", - }, - // Failed and replaced allocs on reconnected nodes - // that are still desired-running are reconnected so - // we can stop them - "failed-original": { - ID: "failed-original", - Name: "web", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - untainted: allocSet{ - "running-replacement": { - ID: "running-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "failed-original", - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{ - "failed-original": { - ID: "failed-original", - Name: "web", - ClientStatus: structs.AllocClientStatusFailed, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - name: "disco-client-reconnect-single-instance-on", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - // Expired allocs on reconnected clients are lost - "expired-reconnect": { - ID: "expired-reconnect", - Name: "expired-reconnect", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - AllocStates: expiredAllocState, - }, - }, - untainted: allocSet{}, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{ - "expired-reconnect": { - ID: "expired-reconnect", - Name: "expired-reconnect", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - AllocStates: expiredAllocState, - }, - }, + }, + untainted: allocSet{ + "running-replacement": { + ID: "running-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "failed-original", }, - { - name: "disco-client-running-reconnecting-and-replacement-untainted-single-instance-on", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - "running-replacement": { - ID: "running-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "running-original", - }, - // Running and replaced allocs on reconnected nodes are reconnecting - "running-original": { - ID: "running-original", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - untainted: allocSet{ - "running-replacement": { - ID: "running-replacement", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - PreviousAllocation: "running-original", - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{ - "running-original": { - ID: "running-original", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - // After an alloc is reconnected, it should be considered - // "untainted" instead of "reconnecting" to allow changes such as - // job updates to be applied properly. - name: "disco-client-reconnected-alloc-untainted", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: false, - all: allocSet{ - "running-reconnected": { - ID: "running-reconnected", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - AllocStates: reconnectedAllocState, - }, - }, - untainted: allocSet{ - "running-reconnected": { - ID: "running-reconnected", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobSingle, - NodeID: "normal", - TaskGroup: "web", - AllocStates: reconnectedAllocState, - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{}, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{}, - expiring: allocSet{}, - }, - { - name: "disco-client-reconnected-alloc-untainted-single-instance-on", - supportsDisconnectedClients: true, - now: time.Now(), - taintedNodes: nodes, - skipNilNodeTest: true, - all: allocSet{ - "untainted-unknown": { - ID: "untainted-unknown", - Name: "web", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnectSingle, - NodeID: "disconnected", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - "disconnecting-running": { - ID: "disconnecting-running", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnectSingle, - NodeID: "disconnected", - TaskGroup: "web", - }, - "lost-running": { - ID: "lost-running", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnect, - NodeID: "disconnected", - TaskGroup: "web", - }, - "untainted-unknown-on-down-node": { - ID: "untainted-unknown-on-down-node", - Name: "web", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnectSingle, - NodeID: "down", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - untainted: allocSet{ - "untainted-unknown": { - ID: "untainted-unknown", - Name: "web", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnectSingle, - NodeID: "disconnected", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - "untainted-unknown-on-down-node": { - ID: "untainted-unknown-on-down-node", - Name: "web", - ClientStatus: structs.AllocClientStatusUnknown, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnectSingle, - NodeID: "down", - TaskGroup: "web", - AllocStates: unknownAllocState, - }, - }, - migrate: allocSet{}, - disconnecting: allocSet{ - "disconnecting-running": { - ID: "disconnecting-running", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnectSingle, - NodeID: "disconnected", - TaskGroup: "web", - }, - }, - reconnecting: allocSet{}, - ignore: allocSet{}, - lost: allocSet{ - "lost-running": { - ID: "lost-running", - Name: "web", - ClientStatus: structs.AllocClientStatusRunning, - DesiredStatus: structs.AllocDesiredStatusRun, - Job: testJobNoMaxDisconnect, - NodeID: "disconnected", - TaskGroup: "web", - }, - }, - expiring: allocSet{}, + }, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{ + "failed-original": { + ID: "failed-original", + Name: "web", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // With tainted nodes - untainted, migrate, lost, disconnecting, reconnecting, ignore, expired := tc.all.filterByTainted(tc.taintedNodes, tc.supportsDisconnectedClients, tc.now) - must.Eq(t, tc.untainted, untainted, must.Sprintf("with-nodes: untainted")) - must.Eq(t, tc.migrate, migrate, must.Sprintf("with-nodes: migrate")) - must.Eq(t, tc.lost, lost, must.Sprintf("with-nodes: lost")) - must.Eq(t, tc.disconnecting, disconnecting, must.Sprintf("with-nodes: disconnecting")) - must.Eq(t, tc.reconnecting, reconnecting, must.Sprintf("with-nodes: reconnecting")) - must.Eq(t, tc.ignore, ignore, must.Sprintf("with-nodes: ignore")) - must.Eq(t, tc.expiring, expired, must.Sprintf("with-nodes: expiring")) + }, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + name: "disco-client-reconnecting-running-no-replacement", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + // Running allocs on reconnected nodes with no replacement are reconnecting. + // Node.UpdateStatus has already handled syncing client state so this + // should be a noop. + "reconnecting-running-no-replacement": { + ID: "reconnecting-running-no-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + untainted: allocSet{}, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{ + "reconnecting-running-no-replacement": { + ID: "reconnecting-running-no-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + name: "disco-client-terminal", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + // Allocs on reconnected nodes that are complete are ignored + "ignored-reconnect-complete": { + ID: "ignored-reconnect-complete", + Name: "ignored-reconnect-complete", + ClientStatus: structs.AllocClientStatusComplete, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + // Failed allocs on reconnected nodes are in reconnecting so that + // they be marked with desired status stop at the server. + "reconnecting-failed": { + ID: "reconnecting-failed", + Name: "reconnecting-failed", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + // Lost allocs on reconnected nodes don't get restarted + "ignored-reconnect-lost": { + ID: "ignored-reconnect-lost", + Name: "ignored-reconnect-lost", + ClientStatus: structs.AllocClientStatusLost, + DesiredStatus: structs.AllocDesiredStatusStop, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + // Replacement allocs that are complete are ignored + "ignored-reconnect-complete-replacement": { + ID: "ignored-reconnect-complete-replacement", + Name: "ignored-reconnect-complete", + ClientStatus: structs.AllocClientStatusComplete, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + PreviousAllocation: "untainted-reconnect-complete", + }, + // Replacement allocs on reconnected nodes that are failed are ignored + "ignored-reconnect-failed-replacement": { + ID: "ignored-reconnect-failed-replacement", + Name: "ignored-reconnect-failed", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusStop, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "reconnecting-failed", + }, + // Lost replacement allocs on reconnected nodes don't get restarted + "ignored-reconnect-lost-replacement": { + ID: "ignored-reconnect-lost-replacement", + Name: "ignored-reconnect-lost", + ClientStatus: structs.AllocClientStatusLost, + DesiredStatus: structs.AllocDesiredStatusStop, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + PreviousAllocation: "untainted-reconnect-lost", + }, + }, + untainted: allocSet{}, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{ + "reconnecting-failed": { + ID: "reconnecting-failed", + Name: "reconnecting-failed", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + ignore: allocSet{ + "ignored-reconnect-complete": { + ID: "ignored-reconnect-complete", + Name: "ignored-reconnect-complete", + ClientStatus: structs.AllocClientStatusComplete, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + "ignored-reconnect-lost": { + ID: "ignored-reconnect-lost", + Name: "ignored-reconnect-lost", + ClientStatus: structs.AllocClientStatusLost, + DesiredStatus: structs.AllocDesiredStatusStop, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + "ignored-reconnect-complete-replacement": { + ID: "ignored-reconnect-complete-replacement", + Name: "ignored-reconnect-complete", + ClientStatus: structs.AllocClientStatusComplete, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + PreviousAllocation: "untainted-reconnect-complete", + }, + "ignored-reconnect-failed-replacement": { + ID: "ignored-reconnect-failed-replacement", + Name: "ignored-reconnect-failed", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusStop, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "reconnecting-failed", + }, + "ignored-reconnect-lost-replacement": { + ID: "ignored-reconnect-lost-replacement", + Name: "ignored-reconnect-lost", + ClientStatus: structs.AllocClientStatusLost, + DesiredStatus: structs.AllocDesiredStatusStop, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + PreviousAllocation: "untainted-reconnect-lost", + }, + }, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + name: "disco-client-disconnect", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: true, + all: allocSet{ + // Non-terminal allocs on disconnected nodes are disconnecting + "disconnect-running": { + ID: "disconnect-running", + Name: "disconnect-running", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + }, + // Unknown allocs on disconnected nodes are acknowledge, so they wont be rescheduled again + "untainted-unknown": { + ID: "untainted-unknown", + Name: "untainted-unknown", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + // Unknown allocs on disconnected nodes are lost when expired + "expiring-unknown": { + ID: "expiring-unknown", + Name: "expiring-unknown", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + AllocStates: expiredAllocState, + }, + // Pending allocs on disconnected nodes are lost + "lost-pending": { + ID: "lost-pending", + Name: "lost-pending", + ClientStatus: structs.AllocClientStatusPending, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + }, + // Expired allocs on reconnected clients are lost + "expiring-expired": { + ID: "expiring-expired", + Name: "expiring-expired", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: expiredAllocState, + }, + // Failed and stopped allocs on disconnected nodes are ignored + "ignore-reconnected-failed-stopped": { + ID: "ignore-reconnected-failed-stopped", + Name: "ignore-reconnected-failed-stopped", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusStop, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + untainted: allocSet{ + // Unknown allocs on disconnected nodes are acknowledge, so they wont be rescheduled again + "untainted-unknown": { + ID: "untainted-unknown", + Name: "untainted-unknown", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + migrate: allocSet{}, + disconnecting: allocSet{ + "disconnect-running": { + ID: "disconnect-running", + Name: "disconnect-running", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + }, + }, + reconnecting: allocSet{}, + ignore: allocSet{ + "ignore-reconnected-failed-stopped": { + ID: "ignore-reconnected-failed-stopped", + Name: "ignore-reconnected-failed-stopped", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusStop, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + lost: allocSet{ + "lost-pending": { + ID: "lost-pending", + Name: "lost-pending", + ClientStatus: structs.AllocClientStatusPending, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + }, + }, + expiring: allocSet{ + "expiring-unknown": { + ID: "expiring-unknown", + Name: "expiring-unknown", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "disconnected", + TaskGroup: "web", + AllocStates: expiredAllocState, + }, + "expiring-expired": { + ID: "expiring-expired", + Name: "expiring-expired", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: expiredAllocState, + }, + }, + }, + { + name: "disco-client-reconnect", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + // Expired allocs on reconnected clients are lost + "expired-reconnect": { + ID: "expired-reconnect", + Name: "expired-reconnect", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: expiredAllocState, + }, + }, + untainted: allocSet{}, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{ + "expired-reconnect": { + ID: "expired-reconnect", + Name: "expired-reconnect", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: expiredAllocState, + }, + }, + }, + { + name: "disco-client-running-reconnecting-and-replacement-untainted", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + "running-replacement": { + ID: "running-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "running-original", + }, + // Running and replaced allocs on reconnected nodes are reconnecting + "running-original": { + ID: "running-original", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + untainted: allocSet{ + "running-replacement": { + ID: "running-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "running-original", + }, + }, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{ + "running-original": { + ID: "running-original", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + // After an alloc is reconnected, it should be considered + // "untainted" instead of "reconnecting" to allow changes such as + // job updates to be applied properly. + name: "disco-client-reconnected-alloc-untainted", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + "running-reconnected": { + ID: "running-reconnected", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: reconnectedAllocState, + }, + }, + untainted: allocSet{ + "running-reconnected": { + ID: "running-reconnected", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJob, + NodeID: "normal", + TaskGroup: "web", + AllocStates: reconnectedAllocState, + }, + }, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + // Everything below this line tests the single instance on lost mode. + { + name: "lost-client-single-instance-on", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + "untainted1": { + ID: "untainted1", + ClientStatus: structs.AllocClientStatusRunning, + Job: testJobSingle, + NodeID: "normal", + }, + // Terminal allocs are always untainted + "untainted2": { + ID: "untainted2", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJobSingle, + NodeID: "normal", + }, + // Terminal allocs are always untainted, even on draining nodes + "untainted3": { + ID: "untainted3", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJobSingle, + NodeID: "draining", + }, + // Terminal allocs are always untainted, even on lost nodes + "untainted4": { + ID: "untainted4", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJobSingle, + NodeID: "lost", + }, + // Non-terminal alloc with migrate=true should migrate on a draining node + "migrating1": { + ID: "migrating1", + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, + Job: testJobSingle, + NodeID: "draining", + }, + // Non-terminal alloc with migrate=true should migrate on an unknown node + "migrating2": { + ID: "migrating2", + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, + Job: testJobSingle, + NodeID: "nil", + }, + }, + untainted: allocSet{ + "untainted1": { + ID: "untainted1", + ClientStatus: structs.AllocClientStatusRunning, + Job: testJobSingle, + NodeID: "normal", + }, + // Terminal allocs are always untainted + "untainted2": { + ID: "untainted2", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJobSingle, + NodeID: "normal", + }, + // Terminal allocs are always untainted, even on draining nodes + "untainted3": { + ID: "untainted3", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJobSingle, + NodeID: "draining", + }, + // Terminal allocs are always untainted, even on lost nodes + "untainted4": { + ID: "untainted4", + ClientStatus: structs.AllocClientStatusComplete, + Job: testJobSingle, + NodeID: "lost", + }, + }, + migrate: allocSet{ + // Non-terminal alloc with migrate=true should migrate on a draining node + "migrating1": { + ID: "migrating1", + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, + Job: testJobSingle, + NodeID: "draining", + }, + // Non-terminal alloc with migrate=true should migrate on an unknown node + "migrating2": { + ID: "migrating2", + ClientStatus: structs.AllocClientStatusRunning, + DesiredTransition: structs.DesiredTransition{Migrate: pointer.Of(true)}, + Job: testJobSingle, + NodeID: "nil", + }, + }, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + name: "lost-client-only-tainted-nodes-single-instance-on", + supportsDisconnectedClients: false, + now: time.Now(), + taintedNodes: nodes, + // The logic associated with this test case can only trigger if there + // is a tainted node. Therefore, testing with a nil node set produces + // false failures, so don't perform that test if in this case. + skipNilNodeTest: true, + all: allocSet{ + // Non-terminal allocs on lost nodes are lost + "lost1": { + ID: "lost1", + ClientStatus: structs.AllocClientStatusPending, + Job: testJobSingle, + NodeID: "lost", + }, + // Non-terminal allocs on lost nodes are lost + "lost2": { + ID: "lost2", + ClientStatus: structs.AllocClientStatusRunning, + Job: testJobSingle, + NodeID: "lost", + }, + }, + untainted: allocSet{}, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{ + // Non-terminal allocs on lost nodes are lost + "lost1": { + ID: "lost1", + ClientStatus: structs.AllocClientStatusPending, + Job: testJobSingle, + NodeID: "lost", + }, + // Non-terminal allocs on lost nodes are lost + "lost2": { + ID: "lost2", + ClientStatus: structs.AllocClientStatusRunning, + Job: testJobSingle, + NodeID: "lost", + }, + }, + expiring: allocSet{}, + }, + { + name: "disco-client-disconnect-unset-max-disconnect-single-instance-on", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: true, + all: allocSet{ + // Non-terminal allocs on disconnected nodes w/o max-disconnect are lost + "disconnecting-running": { + ID: "disconnecting-running", + Name: "disconnecting-running", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnectSingle, + NodeID: "disconnected", + TaskGroup: "web", + }, + }, + untainted: allocSet{}, + migrate: allocSet{}, + disconnecting: allocSet{"disconnecting-running": { + ID: "disconnecting-running", + Name: "disconnecting-running", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnectSingle, + NodeID: "disconnected", + TaskGroup: "web", + }}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + name: "disco-client-untainted-reconnect-failed-and-replaced-single-instance-on", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + "running-replacement": { + ID: "running-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "failed-original", + }, + // Failed and replaced allocs on reconnected nodes + // that are still desired-running are reconnected so + // we can stop them + "failed-original": { + ID: "failed-original", + Name: "web", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + untainted: allocSet{ + "running-replacement": { + ID: "running-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "failed-original", + }, + }, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{ + "failed-original": { + ID: "failed-original", + Name: "web", + ClientStatus: structs.AllocClientStatusFailed, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + name: "disco-client-reconnect-single-instance-on", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + // Expired allocs on reconnected clients are lost + "expired-reconnect": { + ID: "expired-reconnect", + Name: "expired-reconnect", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + AllocStates: expiredAllocState, + }, + }, + untainted: allocSet{}, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{ + "expired-reconnect": { + ID: "expired-reconnect", + Name: "expired-reconnect", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + AllocStates: expiredAllocState, + }, + }, + }, + { + name: "disco-client-running-reconnecting-and-replacement-untainted-single-instance-on", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + "running-replacement": { + ID: "running-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "running-original", + }, + // Running and replaced allocs on reconnected nodes are reconnecting + "running-original": { + ID: "running-original", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + untainted: allocSet{ + "running-replacement": { + ID: "running-replacement", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + PreviousAllocation: "running-original", + }, + }, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{ + "running-original": { + ID: "running-original", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + // After an alloc is reconnected, it should be considered + // "untainted" instead of "reconnecting" to allow changes such as + // job updates to be applied properly. + name: "disco-client-reconnected-alloc-untainted", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: false, + all: allocSet{ + "running-reconnected": { + ID: "running-reconnected", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + AllocStates: reconnectedAllocState, + }, + }, + untainted: allocSet{ + "running-reconnected": { + ID: "running-reconnected", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobSingle, + NodeID: "normal", + TaskGroup: "web", + AllocStates: reconnectedAllocState, + }, + }, + migrate: allocSet{}, + disconnecting: allocSet{}, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{}, + expiring: allocSet{}, + }, + { + name: "disco-client-reconnected-alloc-untainted-single-instance-on", + supportsDisconnectedClients: true, + now: time.Now(), + taintedNodes: nodes, + skipNilNodeTest: true, + all: allocSet{ + "untainted-unknown": { + ID: "untainted-unknown", + Name: "web", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnectSingle, + NodeID: "disconnected", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + "disconnecting-running": { + ID: "disconnecting-running", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnectSingle, + NodeID: "disconnected", + TaskGroup: "web", + }, + "lost-running": { + ID: "lost-running", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnect, + NodeID: "disconnected", + TaskGroup: "web", + }, + "untainted-unknown-on-down-node": { + ID: "untainted-unknown-on-down-node", + Name: "web", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnectSingle, + NodeID: "down", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + untainted: allocSet{ + "untainted-unknown": { + ID: "untainted-unknown", + Name: "web", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnectSingle, + NodeID: "disconnected", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + "untainted-unknown-on-down-node": { + ID: "untainted-unknown-on-down-node", + Name: "web", + ClientStatus: structs.AllocClientStatusUnknown, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnectSingle, + NodeID: "down", + TaskGroup: "web", + AllocStates: unknownAllocState, + }, + }, + migrate: allocSet{}, + disconnecting: allocSet{ + "disconnecting-running": { + ID: "disconnecting-running", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnectSingle, + NodeID: "disconnected", + TaskGroup: "web", + }, + }, + reconnecting: allocSet{}, + ignore: allocSet{}, + lost: allocSet{ + "lost-running": { + ID: "lost-running", + Name: "web", + ClientStatus: structs.AllocClientStatusRunning, + DesiredStatus: structs.AllocDesiredStatusRun, + Job: testJobNoMaxDisconnect, + NodeID: "disconnected", + TaskGroup: "web", + }, + }, + expiring: allocSet{}, + }, + } - if tc.skipNilNodeTest { - return - } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + // With tainted nodes + untainted, migrate, lost, disconnecting, reconnecting, ignore, expired := tc.all.filterByTainted(tc.taintedNodes, tc.supportsDisconnectedClients, tc.now) + must.Eq(t, tc.untainted, untainted, must.Sprintf("with-nodes: untainted")) + must.Eq(t, tc.migrate, migrate, must.Sprintf("with-nodes: migrate")) + must.Eq(t, tc.lost, lost, must.Sprintf("with-nodes: lost")) + must.Eq(t, tc.disconnecting, disconnecting, must.Sprintf("with-nodes: disconnecting")) + must.Eq(t, tc.reconnecting, reconnecting, must.Sprintf("with-nodes: reconnecting")) + must.Eq(t, tc.ignore, ignore, must.Sprintf("with-nodes: ignore")) + must.Eq(t, tc.expiring, expired, must.Sprintf("with-nodes: expiring")) - // Now again with nodes nil - untainted, migrate, lost, disconnecting, reconnecting, ignore, expired = tc.all.filterByTainted(nil, tc.supportsDisconnectedClients, tc.now) - must.Eq(t, tc.untainted, untainted, must.Sprintf("with-nodes: untainted")) - must.Eq(t, tc.migrate, migrate, must.Sprintf("with-nodes: migrate")) - must.Eq(t, tc.lost, lost, must.Sprintf("with-nodes: lost")) - must.Eq(t, tc.disconnecting, disconnecting, must.Sprintf("with-nodes: disconnecting")) - must.Eq(t, tc.reconnecting, reconnecting, must.Sprintf("with-nodes: reconnecting")) - must.Eq(t, tc.ignore, ignore, must.Sprintf("with-nodes: ignore")) - must.Eq(t, tc.ignore, ignore, must.Sprintf("with-nodes: expiring")) - must.Eq(t, tc.expiring, expired, must.Sprintf("with-nodes: expiring")) - }) + if tc.skipNilNodeTest { + return } + + // Now again with nodes nil + untainted, migrate, lost, disconnecting, reconnecting, ignore, expired = tc.all.filterByTainted(nil, tc.supportsDisconnectedClients, tc.now) + must.Eq(t, tc.untainted, untainted, must.Sprintf("with-nodes: untainted")) + must.Eq(t, tc.migrate, migrate, must.Sprintf("with-nodes: migrate")) + must.Eq(t, tc.lost, lost, must.Sprintf("with-nodes: lost")) + must.Eq(t, tc.disconnecting, disconnecting, must.Sprintf("with-nodes: disconnecting")) + must.Eq(t, tc.reconnecting, reconnecting, must.Sprintf("with-nodes: reconnecting")) + must.Eq(t, tc.ignore, ignore, must.Sprintf("with-nodes: ignore")) + must.Eq(t, tc.ignore, ignore, must.Sprintf("with-nodes: expiring")) + must.Eq(t, tc.expiring, expired, must.Sprintf("with-nodes: expiring")) }) } } diff --git a/scheduler/reconnecting_picker/reconnecting_picker.go b/scheduler/reconnecting_picker/reconnecting_picker.go deleted file mode 100644 index 8a6c29ad93a4..000000000000 --- a/scheduler/reconnecting_picker/reconnecting_picker.go +++ /dev/null @@ -1,147 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package reconnectingpicker - -import ( - "time" - - log "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/nomad/structs" -) - -type ReconnectingPicker struct { - logger log.Logger -} - -func New(logger log.Logger) *ReconnectingPicker { - rp := ReconnectingPicker{ - logger: logger.Named("reconnecting-picker"), - } - - return &rp -} - -func (rp *ReconnectingPicker) PickReconnectingAlloc(ds *structs.DisconnectStrategy, original *structs.Allocation, replacement *structs.Allocation) *structs.Allocation { - // Check if the replacement is a newer job version. - // Always prefer the replacement if true. - replacementIsNewer := replacement.Job.Version > original.Job.Version || - replacement.Job.CreateIndex > original.Job.CreateIndex - if replacementIsNewer { - rp.logger.Debug("replacement has a newer job version, keeping replacement") - return replacement - } - - var picker func(*structs.Allocation, *structs.Allocation) *structs.Allocation - - rs := ds.ReconcileStrategy() - rp.logger.Debug("picking according to strategy", "strategy", rs) - - switch rs { - case structs.ReconcileOptionBestScore: - picker = rp.pickBestScore - - case structs.ReconcileOptionKeepOriginal: - picker = rp.pickOriginal - - case structs.ReconcileOptionKeepReplacement: - picker = rp.pickReplacement - - case structs.ReconcileOptionLongestRunning: - picker = rp.pickLongestRunning - } - - return picker(original, replacement) -} - -// pickReconnectingAlloc returns the allocation to keep between the original -// one that is reconnecting and one of its replacements. -// -// This function is not commutative, meaning that pickReconnectingAlloc(A, B) -// is not the same as pickReconnectingAlloc(B, A). Preference is given to keep -// the original allocation when possible. -func (rp *ReconnectingPicker) pickBestScore(original *structs.Allocation, replacement *structs.Allocation) *structs.Allocation { - - // Check if the replacement has better placement score. - // If any of the scores is not available, only pick the replacement if - // itself does have scores. - originalMaxScoreMeta := original.Metrics.MaxNormScore() - replacementMaxScoreMeta := replacement.Metrics.MaxNormScore() - - replacementHasBetterScore := originalMaxScoreMeta == nil && replacementMaxScoreMeta != nil || - (originalMaxScoreMeta != nil && replacementMaxScoreMeta != nil && - replacementMaxScoreMeta.NormScore > originalMaxScoreMeta.NormScore) - - // Check if the replacement has better client status. - // Even with a better placement score make sure we don't replace a running - // allocation with one that is not. - replacementIsRunning := replacement.ClientStatus == structs.AllocClientStatusRunning - originalNotRunning := original.ClientStatus != structs.AllocClientStatusRunning - - if replacementHasBetterScore && (replacementIsRunning || originalNotRunning) { - return replacement - } - - return original -} - -func (rp *ReconnectingPicker) pickOriginal(original, _ *structs.Allocation) *structs.Allocation { - return original -} - -func (rp *ReconnectingPicker) pickReplacement(_, replacement *structs.Allocation) *structs.Allocation { - return replacement -} - -func (rp *ReconnectingPicker) pickLongestRunning(original, replacement *structs.Allocation) *structs.Allocation { - tg := original.Job.LookupTaskGroup(original.TaskGroup) - - orgStartTime := startOfLeaderOrOldestTaskInMain(original, tg) - repStartTime := startOfLeaderOrOldestTaskInMain(replacement, tg) - - if orgStartTime.IsZero() && !repStartTime.IsZero() { - return replacement - } - - if !orgStartTime.IsZero() && repStartTime.IsZero() { - return original - } - - // If neither one of them is running yet, default to best score. - if repStartTime.IsZero() && orgStartTime.IsZero() { - return rp.pickBestScore(original, replacement) - } - - // If the replacement has a later start time, keep the original. - if orgStartTime.Before(repStartTime) { - return original - } - - return replacement -} - -func startOfLeaderOrOldestTaskInMain(alloc *structs.Allocation, tg *structs.TaskGroup) time.Time { - if tg == nil || len(tg.Tasks) == 0 { - return time.Time{} - } - - now := time.Now().UTC() - oldestStart := now - - for _, task := range tg.Tasks { - ls := alloc.LastStartOfTask(task.Name) - if task.Leader { - return ls - } - - if !ls.IsZero() && ls.Before(oldestStart) { - oldestStart = ls - } - } - - if oldestStart == now { - return time.Time{} - } - - return oldestStart -} diff --git a/scheduler/reconnecting_picker/reconnecting_picker_test.go b/scheduler/reconnecting_picker/reconnecting_picker_test.go deleted file mode 100644 index 01bbf213c76a..000000000000 --- a/scheduler/reconnecting_picker/reconnecting_picker_test.go +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright (c) HashiCorp, Inc. -// SPDX-License-Identifier: BUSL-1.1 - -package reconnectingpicker - -import ( - "fmt" - "testing" - "time" - - "github.com/hashicorp/go-hclog" - "github.com/hashicorp/nomad/nomad/structs" - "github.com/shoenig/test/must" -) - -func TestPickReconnectingAlloc_NewerVersion(t *testing.T) { - rp := New(hclog.NewNullLogger()) - ds := &structs.DisconnectStrategy{ - Reconcile: "best-score", - } - - replacement := &structs.Allocation{ - Job: &structs.Job{ - Version: 2, - CreateIndex: 2, - }, - } - - testCases := []struct { - name string - version uint64 - createIndex uint64 - expected *structs.Allocation - }{ - { - name: "original_is_older", - version: 1, - createIndex: 1, - expected: replacement, - }, - { - name: "original_has_older_version", - version: 1, - createIndex: 2, - expected: replacement, - }, - { - name: "original_has_older_create_index", - version: 2, - createIndex: 1, - expected: replacement, - }, - } - - for _, tc := range testCases { - original := &structs.Allocation{ - Job: &structs.Job{ - Version: tc.version, - CreateIndex: tc.createIndex, - }, - } - - result := rp.PickReconnectingAlloc(ds, original, replacement) - - must.Eq(t, tc.expected, result) - } -} - -func TestPickReconnectingAlloc_DifferentStrategies(t *testing.T) { - rp := New(hclog.NewNullLogger()) - now := time.Now() - - original := &structs.Allocation{ - TaskGroup: "taskgroup1", - Job: &structs.Job{ - Version: 1, - CreateIndex: 1, - TaskGroups: []*structs.TaskGroup{ - { - Name: "taskgroup1", - Tasks: []*structs.Task{ - { - Name: "task1", - }, - }, - }, - }, - }, - TaskStates: map[string]*structs.TaskState{ - "task1": { - Restarts: 0, - StartedAt: now.Add(-time.Hour), - }, - }, - Metrics: &structs.AllocMetric{ - ScoreMetaData: []*structs.NodeScoreMeta{ - { - NormScore: 10, - }, - }, - }, - } - - replacement := &structs.Allocation{ - Job: &structs.Job{ - Version: 1, - CreateIndex: 1, - TaskGroups: []*structs.TaskGroup{ - { - Name: "taskgroup1", - Tasks: []*structs.Task{ - { - Name: "task1", - }, - }, - }, - }, - }, - TaskStates: map[string]*structs.TaskState{ - "task1": { - Restarts: 0, - StartedAt: now.Add(-30 * time.Minute), - }, - }, - Metrics: &structs.AllocMetric{ - ScoreMetaData: []*structs.NodeScoreMeta{ - { - NormScore: 20, - }, - }, - }, - } - - testsCases := []struct { - name string - strategy string - - expected *structs.Allocation - }{ - { - name: "keep_the_allocation_with_the_best_score", - strategy: structs.ReconcileOptionBestScore, - expected: replacement, - }, - { - name: "keep_the_original_allocation", - strategy: structs.ReconcileOptionKeepOriginal, - expected: original, - }, - { - name: "keep_the_replacement_allocation", - strategy: structs.ReconcileOptionKeepReplacement, - expected: replacement, - }, - { - name: "keep_the_longest_running_allocation", - strategy: structs.ReconcileOptionLongestRunning, - expected: original, - }, - } - - for _, tc := range testsCases { - t.Run(tc.name, func(t *testing.T) { - ds := &structs.DisconnectStrategy{ - Reconcile: tc.strategy, - } - - result := rp.PickReconnectingAlloc(ds, original, replacement) - must.Eq(t, tc.expected, result) - - }) - } -} - -func TestPickReconnectingAlloc_BestScore(t *testing.T) { - rp := New(hclog.NewNullLogger()) - - original := &structs.Allocation{ - Job: &structs.Job{ - Version: 1, - CreateIndex: 1, - }, - TaskGroup: "taskgroup1", - Metrics: &structs.AllocMetric{ - ScoreMetaData: []*structs.NodeScoreMeta{ - { - NormScore: 10, - }, - }, - }, - } - - replacement := original.Copy() - - testsCases := []struct { - name string - originalClientStatus string - replacementClientStatus string - replacementScore float64 - expected *structs.Allocation - }{ - { - name: "replacement_has_better_score_and_running", - replacementScore: 20, - originalClientStatus: structs.AllocClientStatusRunning, - replacementClientStatus: structs.AllocClientStatusRunning, - expected: replacement, - }, - { - name: "original_has_better_score_and_running", - originalClientStatus: structs.AllocClientStatusRunning, - replacementClientStatus: structs.AllocClientStatusRunning, - replacementScore: 5, - expected: original, - }, - { - name: "replacement_has_better_score_but_replacement_not_running", - originalClientStatus: structs.AllocClientStatusRunning, - replacementClientStatus: structs.AllocClientStatusPending, - replacementScore: 20, - expected: original, - }, - { - name: "replacement_has_better_score_and_original_not_running", - originalClientStatus: structs.AllocClientStatusPending, - replacementClientStatus: structs.AllocClientStatusRunning, - replacementScore: 20, - expected: replacement, - }, - { - name: "original_has_better_score_but_not_running", - originalClientStatus: structs.AllocClientStatusPending, - replacementClientStatus: structs.AllocClientStatusRunning, - replacementScore: 5, - expected: original, - }, - { - name: "original_has_better_score_and_replacement_not_running", - originalClientStatus: structs.AllocClientStatusRunning, - replacementClientStatus: structs.AllocClientStatusPending, - replacementScore: 5, - expected: original, - }, - } - - for _, tc := range testsCases { - t.Run(tc.name, func(t *testing.T) { - original.ClientStatus = tc.originalClientStatus - - replacement.ClientStatus = tc.replacementClientStatus - replacement.Metrics.ScoreMetaData[0].NormScore = tc.replacementScore - - result := rp.PickReconnectingAlloc(&structs.DisconnectStrategy{ - Reconcile: structs.ReconcileOptionBestScore, - }, original, replacement) - - must.Eq(t, tc.expected, result) - }) - } -} - -func TestPickReconnectingAlloc_LongestRunning(t *testing.T) { - rp := New(hclog.NewNullLogger()) - now := time.Now() - fmt.Println(now) - taskGroupNoLeader := &structs.TaskGroup{ - Name: "taskGroupNoLeader", - Tasks: []*structs.Task{ - { - Name: "task1", - }, - { - Name: "task2", - }, - { - Name: "task3", - }, - }, - } - - taskGroupWithLeader := &structs.TaskGroup{ - Name: "taskGroupWithLeader", - Tasks: []*structs.Task{ - { - Name: "task1", - }, - { - Name: "task2", - Leader: true, - }, - { - Name: "task3", - }, - }, - } - - emptyTaskGroup := &structs.TaskGroup{ - Name: "emptyTaskGroup", - } - - original := &structs.Allocation{ - Job: &structs.Job{ - Version: 1, - CreateIndex: 1, - TaskGroups: []*structs.TaskGroup{ - taskGroupNoLeader, - taskGroupWithLeader, - emptyTaskGroup, - }, - }, - TaskStates: map[string]*structs.TaskState{ - "task2": {}, - }, - Metrics: &structs.AllocMetric{ - ScoreMetaData: []*structs.NodeScoreMeta{ - { - NormScore: 10, - }, - }, - }, - } - - replacement := original.Copy() - replacement.Metrics.ScoreMetaData[0].NormScore = 20 - - testsCases := []struct { - name string - taskGroupName string - originalState structs.TaskState - replacementState structs.TaskState - expected *structs.Allocation - }{ - { - name: "original_with_no_restart", - taskGroupName: "taskGroupNoLeader", - replacementState: structs.TaskState{ - StartedAt: now.Add(-30 * time.Minute), - Restarts: 2, - LastRestart: now.Add(-10 * time.Minute), - }, - originalState: structs.TaskState{ - StartedAt: now.Add(-time.Hour), - Restarts: 0, - }, - expected: original, - }, - { - name: "original_with_no_restart_on_leader", - taskGroupName: "taskGroupWithLeader", - replacementState: structs.TaskState{ - StartedAt: now.Add(-30 * time.Minute), - Restarts: 2, - LastRestart: now.Add(-10 * time.Minute), - }, - originalState: structs.TaskState{ - StartedAt: now.Add(-time.Hour), - Restarts: 0, - }, - expected: original, - }, - { - name: "empty_task_group", - taskGroupName: "emptyTaskGroup", - replacementState: structs.TaskState{ - StartedAt: now.Add(-30 * time.Minute), - Restarts: 2, - LastRestart: now.Add(-10 * time.Minute), - }, - originalState: structs.TaskState{ - StartedAt: now.Add(-time.Hour), - Restarts: 0, - }, - expected: replacement, - }, - { - name: "original_with_no_restart_on_leader", - taskGroupName: "taskGroupNoLeader", - replacementState: structs.TaskState{ - StartedAt: now.Add(-30 * time.Minute), - Restarts: 2, - LastRestart: now.Add(-10 * time.Minute), - }, - originalState: structs.TaskState{ - StartedAt: now.Add(-time.Hour), - Restarts: 0, - }, - expected: original, - }, - { - name: "original_with_older_restarts", - taskGroupName: "taskGroupNoLeader", - replacementState: structs.TaskState{ - StartedAt: now.Add(-30 * time.Minute), - Restarts: 2, - LastRestart: now.Add(-10 * time.Minute), - }, - originalState: structs.TaskState{ - StartedAt: now.Add(-time.Hour), - Restarts: 4, - LastRestart: now.Add(-50 * time.Minute), - }, - expected: original, - }, - { - name: "original_with_newer_restarts", - taskGroupName: "taskGroupNoLeader", - replacementState: structs.TaskState{ - StartedAt: now.Add(-30 * time.Minute), - Restarts: 2, - LastRestart: now.Add(-10 * time.Minute), - }, - originalState: structs.TaskState{ - StartedAt: now.Add(-time.Hour), - Restarts: 4, - LastRestart: now.Add(-5 * time.Minute), - }, - expected: replacement, - }, - { - name: "original_with_zero_start_time", - taskGroupName: "taskGroupNoLeader", - replacementState: structs.TaskState{ - StartedAt: now.Add(-30 * time.Minute), - Restarts: 2, - LastRestart: now.Add(-10 * time.Minute), - }, - originalState: structs.TaskState{ - StartedAt: time.Time{}, - Restarts: 0, - }, - expected: replacement, - }, - { - name: "replacement_with_zero_start_time", - taskGroupName: "taskGroupNoLeader", - replacementState: structs.TaskState{ - StartedAt: time.Time{}, - Restarts: 0, - }, - originalState: structs.TaskState{ - StartedAt: now.Add(-30 * time.Minute), - Restarts: 2, - LastRestart: now.Add(-10 * time.Minute), - }, - expected: original, - }, - { - name: "both_with_zero_start_time_pick_best_score", - taskGroupName: "taskGroupNoLeader", - replacementState: structs.TaskState{ - StartedAt: time.Time{}, - Restarts: 0, - }, - originalState: structs.TaskState{ - StartedAt: time.Time{}, - Restarts: 0, - }, - expected: replacement, - }, - } - - for _, tc := range testsCases { - t.Run(tc.name, func(t *testing.T) { - original.TaskGroup = tc.taskGroupName - replacement.TaskGroup = tc.taskGroupName - - original.TaskStates["task2"] = &tc.originalState - replacement.TaskStates["task2"] = &tc.replacementState - - result := rp.PickReconnectingAlloc(&structs.DisconnectStrategy{ - Reconcile: structs.ReconcileOptionLongestRunning, - }, original, replacement) - - must.Eq(t, tc.expected, result) - }) - } -} diff --git a/scheduler/stack_test.go b/scheduler/stack_test.go index 0197bb245424..fe4cc33e887b 100644 --- a/scheduler/stack_test.go +++ b/scheduler/stack_test.go @@ -13,6 +13,7 @@ import ( "github.com/hashicorp/nomad/nomad/mock" "github.com/hashicorp/nomad/nomad/structs" "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func BenchmarkServiceStack_With_ComputedClass(b *testing.B) { @@ -149,11 +150,15 @@ func TestServiceStack_Select_PreferringNodes(t *testing.T) { prefNodes := []*structs.Node{preferredNode} selectOptions := &SelectOptions{PreferredNodes: prefNodes} option := stack.Select(job.TaskGroups[0], selectOptions) - must.NotNil(t, option, must.Sprintf("missing node %#v", ctx.Metrics())) - must.Eq(t, option.Node.ID, preferredNode.ID) + if option == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } + if option.Node.ID != preferredNode.ID { + t.Fatalf("expected: %v, actual: %v", option.Node.ID, preferredNode.ID) + } // Make sure select doesn't have a side effect on preferred nodes - must.Eq(t, prefNodes, selectOptions.PreferredNodes) + require.Equal(t, prefNodes, selectOptions.PreferredNodes) // Change the preferred node's kernel to windows and ensure the allocations // are placed elsewhere @@ -163,9 +168,14 @@ func TestServiceStack_Select_PreferringNodes(t *testing.T) { prefNodes1 := []*structs.Node{preferredNode1} selectOptions = &SelectOptions{PreferredNodes: prefNodes1} option = stack.Select(job.TaskGroups[0], selectOptions) - must.NotNil(t, option, must.Sprintf("missing node %#v", ctx.Metrics())) - must.Eq(t, option.Node.ID, nodes[0].ID) - must.Eq(t, prefNodes1, selectOptions.PreferredNodes) + if option == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } + + if option.Node.ID != nodes[0].ID { + t.Fatalf("expected: %#v, actual: %#v", nodes[0], option.Node) + } + require.Equal(t, prefNodes1, selectOptions.PreferredNodes) } func TestServiceStack_Select_MetricsReset(t *testing.T) { @@ -186,16 +196,24 @@ func TestServiceStack_Select_MetricsReset(t *testing.T) { selectOptions := &SelectOptions{} n1 := stack.Select(job.TaskGroups[0], selectOptions) m1 := ctx.Metrics() - must.NotNil(t, n1, must.Sprintf("missing node %#v", m1)) + if n1 == nil { + t.Fatalf("missing node %#v", m1) + } - must.Eq(t, 2, m1.NodesEvaluated) + if m1.NodesEvaluated != 2 { + t.Fatalf("should only be 2") + } n2 := stack.Select(job.TaskGroups[0], selectOptions) m2 := ctx.Metrics() - must.NotNil(t, n2, must.Sprintf("missing node %#v", m2)) + if n2 == nil { + t.Fatalf("missing node %#v", m2) + } // If we don't reset, this would be 4 - must.Eq(t, 2, m2.NodesEvaluated) + if m2.NodesEvaluated != 2 { + t.Fatalf("should only be 2") + } } func TestServiceStack_Select_DriverFilter(t *testing.T) { @@ -208,7 +226,9 @@ func TestServiceStack_Select_DriverFilter(t *testing.T) { } zero := nodes[0] zero.Attributes["driver.foo"] = "1" - must.NoError(t, zero.ComputeClass()) + if err := zero.ComputeClass(); err != nil { + t.Fatalf("ComputedClass() failed: %v", err) + } stack := NewGenericStack(false, ctx) stack.SetNodes(nodes) @@ -219,9 +239,13 @@ func TestServiceStack_Select_DriverFilter(t *testing.T) { selectOptions := &SelectOptions{} node := stack.Select(job.TaskGroups[0], selectOptions) - must.NotNil(t, node, must.Sprintf("missing node %#v", ctx.Metrics())) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } - must.Eq(t, zero, node.Node) + if node.Node != zero { + t.Fatalf("bad") + } } func TestServiceStack_Select_HostVolume(t *testing.T) { @@ -335,7 +359,7 @@ func TestServiceStack_Select_CSI(t *testing.T) { v.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem v.PluginID = "bar" err := state.UpsertCSIVolume(999, []*structs.CSIVolume{v}) - must.NoError(t, err) + require.NoError(t, err) // Create a node with healthy fingerprints for both controller and node plugins zero := nodes[0] @@ -362,10 +386,12 @@ func TestServiceStack_Select_CSI(t *testing.T) { // Add the node to the state store to index the healthy plugins and mark the volume "foo" healthy err = state.UpsertNode(structs.MsgTypeTestSetup, 1000, zero) - must.NoError(t, err) + require.NoError(t, err) // Use the node to build the stack and test - must.NoError(t, zero.ComputeClass()) + if err := zero.ComputeClass(); err != nil { + t.Fatalf("ComputedClass() failed: %v", err) + } stack := NewGenericStack(false, ctx) stack.SetNodes(nodes) @@ -385,9 +411,13 @@ func TestServiceStack_Select_CSI(t *testing.T) { selectOptions := &SelectOptions{ AllocName: structs.AllocName(job.Name, job.TaskGroups[0].Name, 0)} node := stack.Select(job.TaskGroups[0], selectOptions) - must.NotNil(t, node, must.Sprintf("missing node %#v", ctx.Metrics())) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } - must.Eq(t, zero, node.Node) + if node.Node != zero { + t.Fatalf("bad") + } } func TestServiceStack_Select_ConstraintFilter(t *testing.T) { @@ -412,14 +442,24 @@ func TestServiceStack_Select_ConstraintFilter(t *testing.T) { stack.SetJob(job) selectOptions := &SelectOptions{} node := stack.Select(job.TaskGroups[0], selectOptions) - must.NotNil(t, node, must.Sprintf("missing node %#v", ctx.Metrics())) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } - must.Eq(t, zero, node.Node) + if node.Node != zero { + t.Fatalf("bad") + } met := ctx.Metrics() - must.One(t, met.NodesFiltered) - must.One(t, met.ClassFiltered["linux-medium-pci"]) - must.One(t, met.ConstraintFiltered["${attr.kernel.name} = freebsd"]) + if met.NodesFiltered != 1 { + t.Fatalf("bad: %#v", met) + } + if met.ClassFiltered["linux-medium-pci"] != 1 { + t.Fatalf("bad: %#v", met) + } + if met.ConstraintFiltered["${attr.kernel.name} = freebsd"] != 1 { + t.Fatalf("bad: %#v", met) + } } func TestServiceStack_Select_BinPack_Overflow(t *testing.T) { @@ -446,14 +486,25 @@ func TestServiceStack_Select_BinPack_Overflow(t *testing.T) { selectOptions := &SelectOptions{} node := stack.Select(job.TaskGroups[0], selectOptions) ctx.Metrics().PopulateScoreMetaData() - must.NotNil(t, node) - must.Eq(t, zero, node.Node) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } + + if node.Node != zero { + t.Fatalf("bad") + } met := ctx.Metrics() - must.One(t, met.NodesExhausted) - must.One(t, met.ClassExhausted["linux-medium-pci"]) + if met.NodesExhausted != 1 { + t.Fatalf("bad: %#v", met) + } + if met.ClassExhausted["linux-medium-pci"] != 1 { + t.Fatalf("bad: %#v", met) + } // Expect score metadata for one node - must.Len(t, 1, met.ScoreMetaData) + if len(met.ScoreMetaData) != 1 { + t.Fatalf("bad: %#v", met) + } } func TestSystemStack_SetNodes(t *testing.T) { @@ -475,7 +526,9 @@ func TestSystemStack_SetNodes(t *testing.T) { stack.SetNodes(nodes) out := collectFeasible(stack.source) - must.Eq(t, out, nodes) + if !reflect.DeepEqual(out, nodes) { + t.Fatalf("bad: %#v", out) + } } func TestSystemStack_SetJob(t *testing.T) { @@ -487,8 +540,12 @@ func TestSystemStack_SetJob(t *testing.T) { job := mock.Job() stack.SetJob(job) - must.Eq(t, stack.binPack.priority, job.Priority) - must.Eq(t, stack.jobConstraint.constraints, job.Constraints) + if stack.binPack.priority != job.Priority { + t.Fatalf("bad") + } + if !reflect.DeepEqual(stack.jobConstraint.constraints, job.Constraints) { + t.Fatalf("bad") + } } func TestSystemStack_Select_Size(t *testing.T) { @@ -503,7 +560,9 @@ func TestSystemStack_Select_Size(t *testing.T) { stack.SetJob(job) selectOptions := &SelectOptions{} node := stack.Select(job.TaskGroups[0], selectOptions) - must.NotNil(t, node) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } // Note: On Windows time.Now currently has a best case granularity of 1ms. // We skip the following assertion on Windows because this test usually @@ -532,15 +591,24 @@ func TestSystemStack_Select_MetricsReset(t *testing.T) { selectOptions := &SelectOptions{} n1 := stack.Select(job.TaskGroups[0], selectOptions) m1 := ctx.Metrics() - must.NotNil(t, n1) - must.One(t, m1.NodesEvaluated) + if n1 == nil { + t.Fatalf("missing node %#v", m1) + } + + if m1.NodesEvaluated != 1 { + t.Fatalf("should only be 1") + } n2 := stack.Select(job.TaskGroups[0], selectOptions) m2 := ctx.Metrics() - must.NotNil(t, n2) + if n2 == nil { + t.Fatalf("missing node %#v", m2) + } // If we don't reset, this would be 2 - must.One(t, m2.NodesEvaluated) + if m2.NodesEvaluated != 1 { + t.Fatalf("should only be 2") + } } func TestSystemStack_Select_DriverFilter(t *testing.T) { @@ -562,17 +630,26 @@ func TestSystemStack_Select_DriverFilter(t *testing.T) { selectOptions := &SelectOptions{} node := stack.Select(job.TaskGroups[0], selectOptions) - must.NotNil(t, node) - must.Eq(t, zero, node.Node) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } + + if node.Node != zero { + t.Fatalf("bad") + } zero.Attributes["driver.foo"] = "0" - must.NoError(t, zero.ComputeClass()) + if err := zero.ComputeClass(); err != nil { + t.Fatalf("ComputedClass() failed: %v", err) + } stack = NewSystemStack(false, ctx) stack.SetNodes(nodes) stack.SetJob(job) node = stack.Select(job.TaskGroups[0], selectOptions) - must.Nil(t, node) + if node != nil { + t.Fatalf("node not filtered %#v", node) + } } func TestSystemStack_Select_ConstraintFilter(t *testing.T) { @@ -598,13 +675,24 @@ func TestSystemStack_Select_ConstraintFilter(t *testing.T) { selectOptions := &SelectOptions{} node := stack.Select(job.TaskGroups[0], selectOptions) - must.NotNil(t, node) - must.Eq(t, zero, node.Node) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } + + if node.Node != zero { + t.Fatalf("bad") + } met := ctx.Metrics() - must.One(t, met.NodesFiltered) - must.One(t, met.ClassFiltered["linux-medium-pci"]) - must.One(t, met.ConstraintFiltered["${attr.kernel.name} = freebsd"]) + if met.NodesFiltered != 1 { + t.Fatalf("bad: %#v", met) + } + if met.ClassFiltered["linux-medium-pci"] != 1 { + t.Fatalf("bad: %#v", met) + } + if met.ConstraintFiltered["${attr.kernel.name} = freebsd"] != 1 { + t.Fatalf("bad: %#v", met) + } } func TestSystemStack_Select_BinPack_Overflow(t *testing.T) { @@ -632,12 +720,23 @@ func TestSystemStack_Select_BinPack_Overflow(t *testing.T) { selectOptions := &SelectOptions{} node := stack.Select(job.TaskGroups[0], selectOptions) ctx.Metrics().PopulateScoreMetaData() - must.NotNil(t, node) - must.Eq(t, one, node.Node) + if node == nil { + t.Fatalf("missing node %#v", ctx.Metrics()) + } + + if node.Node != one { + t.Fatalf("bad") + } met := ctx.Metrics() - must.One(t, met.NodesExhausted) - must.One(t, met.ClassExhausted["linux-medium-pci"]) + if met.NodesExhausted != 1 { + t.Fatalf("bad: %#v", met) + } + if met.ClassExhausted["linux-medium-pci"] != 1 { + t.Fatalf("bad: %#v", met) + } // Should have two scores, one from bin packing and one from normalization - must.Len(t, 1, met.ScoreMetaData) + if len(met.ScoreMetaData) != 1 { + t.Fatalf("bad: %#v", met) + } } diff --git a/scheduler/util.go b/scheduler/util.go index 57eaa55ed767..13151a2aa6a8 100644 --- a/scheduler/util.go +++ b/scheduler/util.go @@ -12,7 +12,6 @@ import ( log "github.com/hashicorp/go-hclog" memdb "github.com/hashicorp/go-memdb" - "github.com/hashicorp/go-set/v2" "github.com/hashicorp/nomad/helper" "github.com/hashicorp/nomad/nomad/structs" ) @@ -335,11 +334,6 @@ func tasksUpdated(jobA, jobB *structs.Job, taskGroup string) comparison { return difference("task log disabled", at.LogConfig.Disabled, bt.LogConfig.Disabled) } - // Check volume mount updates - if c := volumeMountsUpdated(at.VolumeMounts, bt.VolumeMounts); c.modified { - return c - } - // Check if restart.render_templates is updated if c := renderTemplatesUpdated(at.RestartPolicy, bt.RestartPolicy, "task restart render_templates"); c.modified { @@ -430,32 +424,6 @@ func connectServiceUpdated(servicesA, servicesB []*structs.Service) comparison { return same } -func volumeMountsUpdated(a, b []*structs.VolumeMount) comparison { - setA := set.HashSetFrom(a) - setB := set.HashSetFrom(b) - - if setA.Equal(setB) { - return same - } - - return difference("volume mounts", a, b) -} - -// volumeMountUpdated returns true if the definition of the volume mount -// has been updated in a manner that will requires the task to be recreated. -func volumeMountUpdated(mountA, mountB *structs.VolumeMount) comparison { - if mountA != nil && mountB == nil { - difference("volume mount removed", mountA, mountB) - } - - if mountA != nil && mountB != nil && - mountA.SELinuxLabel != mountB.SELinuxLabel { - return difference("volume mount selinux label", mountA.SELinuxLabel, mountB.SELinuxLabel) - } - - return same -} - // connectUpdated returns true if the connect block has been updated in a manner // that will require a destructive update. // diff --git a/scheduler/util_test.go b/scheduler/util_test.go index b0d17b37aa16..a7049333845b 100644 --- a/scheduler/util_test.go +++ b/scheduler/util_test.go @@ -547,42 +547,6 @@ func TestTasksUpdated(t *testing.T) { // Compare changed Template ErrMissingKey j30.TaskGroups[0].Tasks[0].Templates[0].ErrMissingKey = true must.True(t, tasksUpdated(j29, j30, name).modified) - - // Compare identical volume mounts - j31 := mock.Job() - j32 := j31.Copy() - - must.False(t, tasksUpdated(j31, j32, name).modified) - - // Modify volume mounts - j31.TaskGroups[0].Tasks[0].VolumeMounts = []*structs.VolumeMount{ - { - Volume: "myvolume", - SELinuxLabel: "z", - }, - } - - j32.TaskGroups[0].Tasks[0].VolumeMounts = []*structs.VolumeMount{ - { - Volume: "myvolume", - SELinuxLabel: "", - }, - } - - must.True(t, tasksUpdated(j31, j32, name).modified) - - // Add volume mount - j32.TaskGroups[0].Tasks[0].VolumeMounts = append(j32.TaskGroups[0].Tasks[0].VolumeMounts, - &structs.VolumeMount{ - Volume: "myvolume2", - SELinuxLabel: "Z", - }) - - // Remove volume mount - j32 = j31.Copy() - j32.TaskGroups[0].Tasks[0].VolumeMounts = nil - - must.True(t, tasksUpdated(j31, j32, name).modified) } func TestTasksUpdated_connectServiceUpdated(t *testing.T) { diff --git a/testutil/tls.go b/testutil/tls.go index db44d4480ee0..a1093e18f340 100644 --- a/testutil/tls.go +++ b/testutil/tls.go @@ -10,20 +10,20 @@ import ( "testing" "github.com/hashicorp/nomad/helper/tlsutil" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) // Assert CA file exists and is a valid CA Returns the CA func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate { t.Helper() - must.FileExists(t, caPath) + require.FileExists(t, caPath) caData, err := os.ReadFile(caPath) - must.NoError(t, err) + require.NoError(t, err) ca, err := tlsutil.ParseCert(string(caData)) - must.NoError(t, err) - must.NotNil(t, ca) + require.NoError(t, err) + require.NotNil(t, ca) return ca } @@ -32,18 +32,21 @@ func IsValidCertificate(t *testing.T, caPath string) *x509.Certificate { func IsValidSigner(t *testing.T, keyPath string) bool { t.Helper() - must.FileExists(t, keyPath) + require.FileExists(t, keyPath) fi, err := os.Stat(keyPath) - must.NoError(t, err) + if err != nil { + t.Fatal("should not happen", err) + } if want, have := fs.FileMode(0600), fi.Mode().Perm(); want != have { t.Fatalf("private key file %s: permissions: want: %o; have: %o", keyPath, want, have) } keyData, err := os.ReadFile(keyPath) - must.NoError(t, err) + require.NoError(t, err) signer, err := tlsutil.ParseSigner(string(keyData)) - must.NoError(t, err) - must.NotNil(t, signer) + require.NoError(t, err) + require.NotNil(t, signer) + return true } diff --git a/testutil/vault.go b/testutil/vault.go index 3622a5943875..ad4c85ce3dbb 100644 --- a/testutil/vault.go +++ b/testutil/vault.go @@ -19,6 +19,7 @@ import ( "github.com/hashicorp/nomad/nomad/structs/config" vapi "github.com/hashicorp/vault/api" testing "github.com/mitchellh/go-testing-interface" + "github.com/stretchr/testify/require" ) // TestVault is a test helper. It uses a fork/exec model to create a test Vault @@ -228,7 +229,7 @@ func (tv *TestVault) Stop() { case <-tv.waitCh: return case <-time.After(1 * time.Second): - tv.t.Fatal("Timed out waiting for vault to terminate") + require.Fail(tv.t, "Timed out waiting for vault to terminate") } } } diff --git a/testutil/wait_test.go b/testutil/wait_test.go index 5f1785a19176..6006df36dfca 100644 --- a/testutil/wait_test.go +++ b/testutil/wait_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/shoenig/test/must" + "github.com/stretchr/testify/require" ) func TestWait_WaitForFilesUntil(t *testing.T) { @@ -30,10 +30,11 @@ func TestWait_WaitForFilesUntil(t *testing.T) { for _, file := range files { t.Logf("Creating file %s ...", file) fh, createErr := os.Create(file) - must.NoError(t, createErr) + require.NoError(t, createErr) - must.Close(t, fh) - must.FileExists(t, file) + closeErr := fh.Close() + require.NoError(t, closeErr) + require.FileExists(t, file) time.Sleep(250 * time.Millisecond) } diff --git a/ui/app/adapters/variable.js b/ui/app/adapters/variable.js index ee023f1e1d39..14de0eca9a0a 100644 --- a/ui/app/adapters/variable.js +++ b/ui/app/adapters/variable.js @@ -6,7 +6,6 @@ // @ts-check import ApplicationAdapter from './application'; import AdapterError from '@ember-data/adapter/error'; -import InvalidError from '@ember-data/adapter/error'; import { pluralize } from 'ember-inflector'; import classic from 'ember-classic-decorator'; import { ConflictError } from '@ember-data/adapter/error'; @@ -142,15 +141,6 @@ export default class VariableAdapter extends ApplicationAdapter { { detail: _normalizeConflictErrorObject(payload), status: 409 }, ]); } - if (status === 400) { - return new InvalidError([ - { - detail: - 'Invalid name. Name must contain only alphanumeric or "-", "_", "~", or "/" characters, and be fewer than 128 characters in length.', - status: 400, - }, - ]); - } return super.handleResponse(...arguments); } } diff --git a/ui/app/components/action-card.hbs b/ui/app/components/action-card.hbs index c8f0f04a4fb1..7bd8db9ad8e6 100644 --- a/ui/app/components/action-card.hbs +++ b/ui/app/components/action-card.hbs @@ -72,7 +72,6 @@ {{/if}} {{#if this.instance.messages.length}} -
           {{this.instance.messages}}
         
diff --git a/ui/app/components/client-node-row.js b/ui/app/components/client-node-row.js index fca7a9a4d986..1375136294f0 100644 --- a/ui/app/components/client-node-row.js +++ b/ui/app/components/client-node-row.js @@ -57,31 +57,37 @@ export default class ClientNodeRow extends Component.extend( @watchRelationship('allocations') watch; - @computed('node.status') + @computed('node.compositeStatus') get nodeStatusColor() { - let status = this.get('node.status'); - if (status === 'disconnected') { + let compositeStatus = this.get('node.compositeStatus'); + + if (compositeStatus === 'draining') { + return 'neutral'; + } else if (compositeStatus === 'ineligible') { return 'warning'; - } else if (status === 'down') { + } else if (compositeStatus === 'down') { return 'critical'; - } else if (status === 'ready') { + } else if (compositeStatus === 'ready') { return 'success'; - } else if (status === 'initializing') { + } else if (compositeStatus === 'initializing') { return 'neutral'; } else { return 'neutral'; } } - @computed('node.status') + @computed('node.compositeStatus') get nodeStatusIcon() { - let status = this.get('node.status'); - if (status === 'disconnected') { + let compositeStatus = this.get('node.compositeStatus'); + + if (compositeStatus === 'draining') { + return 'minus-circle'; + } else if (compositeStatus === 'ineligible') { return 'skip'; - } else if (status === 'down') { + } else if (compositeStatus === 'down') { return 'x-circle'; - } else if (status === 'ready') { + } else if (compositeStatus === 'ready') { return 'check-circle'; - } else if (status === 'initializing') { + } else if (compositeStatus === 'initializing') { return 'entry-point'; } else { return ''; diff --git a/ui/app/components/exec-terminal.js b/ui/app/components/exec-terminal.js index f493434eecae..6865212a36b8 100644 --- a/ui/app/components/exec-terminal.js +++ b/ui/app/components/exec-terminal.js @@ -3,21 +3,15 @@ * SPDX-License-Identifier: BUSL-1.1 */ -// @ts-check - import Component from '@ember/component'; import { FitAddon } from 'xterm-addon-fit'; import WindowResizable from '../mixins/window-resizable'; import { classNames } from '@ember-decorators/component'; import classic from 'ember-classic-decorator'; -import { inject as service } from '@ember/service'; -import { action } from '@ember/object'; @classic @classNames('terminal-container') export default class ExecTerminal extends Component.extend(WindowResizable) { - @service router; - didInsertElement() { super.didInsertElement(...arguments); let fitAddon = new FitAddon(); @@ -27,38 +21,6 @@ export default class ExecTerminal extends Component.extend(WindowResizable) { this.terminal.open(this.element.querySelector('.terminal')); fitAddon.fit(); - this.addExitHandler(); - } - - socketOpen = false; - hasRemovedExitHandler = false; - - @action - addExitHandler() { - window.addEventListener('beforeunload', this.confirmExit.bind(this)); - } - removeExitHandler() { - if (!this.hasRemovedExitHandler) { - window.removeEventListener('beforeunload', this.confirmExit.bind(this)); - this.hasRemovedExitHandler = true; - } - } - - /** - * - * @param {BeforeUnloadEvent} event - * @returns {string} - */ - confirmExit(event) { - if (this.socketOpen) { - event.preventDefault(); - return (event.returnValue = 'Are you sure you want to exit?'); - } - } - - willDestroy() { - super.willDestroy(...arguments); - this.removeExitHandler(); } windowResizeHandler(e) { diff --git a/ui/app/components/variable-form.hbs b/ui/app/components/variable-form.hbs index 686c25cc6b29..2c4a98a6145b 100644 --- a/ui/app/components/variable-form.hbs +++ b/ui/app/components/variable-form.hbs @@ -50,21 +50,22 @@ {{/if}}
- - - Path - + - - + - - Key - + - + disabled={{eq this.keyValues.length 1}} + > + Delete + {{#each-in entry.warnings as |k v|}} {{v}} @@ -161,23 +166,25 @@
{{#unless this.isJSONView}} {{#unless this.isJobTemplateVariable}} - + > + Add More + {{/unless}} {{/unless}} - + > + Save + {{pluralize "Variable" @this.keyValues.length}} +
\ No newline at end of file diff --git a/ui/app/components/variable-form.js b/ui/app/components/variable-form.js index 51d00f08d221..b64614d18675 100644 --- a/ui/app/components/variable-form.js +++ b/ui/app/components/variable-form.js @@ -154,11 +154,6 @@ export default class VariableFormComponent extends Component { } } - get hasInvalidPath() { - let pathNameRegex = new RegExp('^[a-zA-Z0-9-_~/]{1,128}$'); - return !pathNameRegex.test(trimPath([this.path])); - } - @action validateKey(entry, e) { const value = e.target.value; @@ -185,7 +180,6 @@ export default class VariableFormComponent extends Component { delete entry.warnings.duplicateKeyError; entry.warnings.notifyPropertyChange('duplicateKeyError'); } - set(entry, 'key', value); } @action appendRow() { @@ -213,7 +207,6 @@ export default class VariableFormComponent extends Component { * @param {KeyboardEvent} e */ @action setModelPath(e) { - set(this, 'path', e.target.value); set(this.args.model, 'path', e.target.value); } @@ -274,27 +267,18 @@ export default class VariableFormComponent extends Component { this.removeExitHandler(); this.router.transitionTo('variables.variable', this.args.model.id); - } catch (e) { - notifyConflict(this)(e); + } catch (error) { + notifyConflict(this)(error); if (!this.hasConflict) { - let errorMessage = e; - if (e.errors && e.errors.length > 0) { - const nameInvalidError = e.errors.find((err) => err.status === 400); - if (nameInvalidError) { - errorMessage = nameInvalidError.detail; - } - } - - console.log('caught an error', e); this.notifications.add({ title: `Error saving ${this.path}`, - message: errorMessage, + message: error, color: 'critical', sticky: true, }); } else { - if (e.errors[0]?.detail) { - set(this, 'conflictingVariable', e.errors[0].detail); + if (error.errors[0]?.detail) { + set(this, 'conflictingVariable', error.errors[0].detail); } window.scrollTo(0, 0); // because the k/v list may be long, ensure the user is snapped to top to read error } diff --git a/ui/app/components/variable-form/input-group.hbs b/ui/app/components/variable-form/input-group.hbs index 66a1f4e13874..67460ad9b3cb 100644 --- a/ui/app/components/variable-form/input-group.hbs +++ b/ui/app/components/variable-form/input-group.hbs @@ -4,15 +4,26 @@ ~}} \ No newline at end of file diff --git a/ui/app/components/variable-form/namespace-filter.hbs b/ui/app/components/variable-form/namespace-filter.hbs index 15d4e2e35846..53031aec8a14 100644 --- a/ui/app/components/variable-form/namespace-filter.hbs +++ b/ui/app/components/variable-form/namespace-filter.hbs @@ -10,18 +10,19 @@ {{#if trigger.data.isSuccess}} {{#if trigger.data.result}} {{#if @data.namespaceOptions}} - - - {{#each @data.namespaceOptions as |option|}} - - {{option.label}} - - {{/each}} - + {{/if}} {{/if}} {{/if}} diff --git a/ui/app/components/variable-form/related-entities.hbs b/ui/app/components/variable-form/related-entities.hbs index 9c33a18d3498..0885641dafed 100644 --- a/ui/app/components/variable-form/related-entities.hbs +++ b/ui/app/components/variable-form/related-entities.hbs @@ -3,18 +3,18 @@ SPDX-License-Identifier: BUSL-1.1 ~}} - - Automatically-accessible variable - + diff --git a/ui/app/components/variable-paths.hbs b/ui/app/components/variable-paths.hbs index 4b00fc46f434..3016a2a88be6 100644 --- a/ui/app/components/variable-paths.hbs +++ b/ui/app/components/variable-paths.hbs @@ -3,24 +3,22 @@ SPDX-License-Identifier: BUSL-1.1 ~}} - - <:head as |H|> - - - Path - - - Namespace - - - Last Modified - - - - <:body as |B|> + + + + Path + + + Namespace + + + Last Modified + + + {{#each this.folders as |folder|}} - - +
- - - + + {{/each}} {{#each this.files as |file|}} - - + {{#if (can "read variable" path=file.absoluteFilePath namespace=file.variable.namespace)}} {{file.name}} {{/if}} - - + + {{file.variable.namespace}} - - + + {{moment-from-now file.variable.modifyTime}} - - - {{/each}} - - + + + {{/each}} + + + diff --git a/ui/app/controllers/clients/index.js b/ui/app/controllers/clients/index.js index ea2bc84bd8a4..b7f38375f571 100644 --- a/ui/app/controllers/clients/index.js +++ b/ui/app/controllers/clients/index.js @@ -3,8 +3,6 @@ * SPDX-License-Identifier: BUSL-1.1 */ -// @ts-check - /* eslint-disable ember/no-incorrect-calls-with-inline-anonymous-functions */ import { alias, readOnly } from '@ember/object/computed'; import { inject as service } from '@ember/service'; @@ -47,6 +45,9 @@ export default class IndexController extends Controller.extend( { qpClass: 'class', }, + { + qpState: 'state', + }, { qpDatacenter: 'dc', }, @@ -61,110 +62,6 @@ export default class IndexController extends Controller.extend( }, ]; - filterFunc = (node) => { - return node.isEligible; - }; - - clientFilterToggles = { - state: [ - { - label: 'initializing', - qp: 'state_initializing', - default: true, - filter: (node) => node.status === 'initializing', - }, - { - label: 'ready', - qp: 'state_ready', - default: true, - filter: (node) => node.status === 'ready', - }, - { - label: 'down', - qp: 'state_down', - default: true, - filter: (node) => node.status === 'down', - }, - { - label: 'disconnected', - qp: 'state_disconnected', - default: true, - filter: (node) => node.status === 'disconnected', - }, - ], - eligibility: [ - { - label: 'eligible', - qp: 'eligibility_eligible', - default: true, - filter: (node) => node.isEligible, - }, - { - label: 'ineligible', - qp: 'eligibility_ineligible', - default: true, - filter: (node) => !node.isEligible, - }, - ], - drainStatus: [ - { - label: 'draining', - qp: 'drain_status_draining', - default: true, - filter: (node) => node.isDraining, - }, - { - label: 'not draining', - qp: 'drain_status_not_draining', - default: true, - filter: (node) => !node.isDraining, - }, - ], - }; - - @computed( - 'state_initializing', - 'state_ready', - 'state_down', - 'state_disconnected', - 'eligibility_eligible', - 'eligibility_ineligible', - 'drain_status_draining', - 'drain_status_not_draining', - 'allToggles.[]' - ) - get activeToggles() { - return this.allToggles.filter((t) => this[t.qp]); - } - - get allToggles() { - return Object.values(this.clientFilterToggles).reduce( - (acc, filters) => acc.concat(filters), - [] - ); - } - - // eslint-disable-next-line ember/classic-decorator-hooks - constructor() { - super(...arguments); - this.addDynamicQueryParams(); - } - - addDynamicQueryParams() { - this.clientFilterToggles.state.forEach((filter) => { - this.queryParams.push({ [filter.qp]: filter.qp }); - this.set(filter.qp, filter.default); - }); - this.clientFilterToggles.eligibility.forEach((filter) => { - this.queryParams.push({ [filter.qp]: filter.qp }); - this.set(filter.qp, filter.default); - }); - this.clientFilterToggles.drainStatus.forEach((filter) => { - this.queryParams.push({ [filter.qp]: filter.qp }); - this.set(filter.qp, filter.default); - }); - } - currentPage = 1; @readOnly('userSettings.pageSize') pageSize; @@ -177,12 +74,14 @@ export default class IndexController extends Controller.extend( } qpClass = ''; + qpState = ''; qpDatacenter = ''; qpVersion = ''; qpVolume = ''; qpNodePool = ''; @selection('qpClass') selectionClass; + @selection('qpState') selectionState; @selection('qpDatacenter') selectionDatacenter; @selection('qpVersion') selectionVersion; @selection('qpVolume') selectionVolume; @@ -206,6 +105,18 @@ export default class IndexController extends Controller.extend( return classes.sort().map((dc) => ({ key: dc, label: dc })); } + @computed + get optionsState() { + return [ + { key: 'initializing', label: 'Initializing' }, + { key: 'ready', label: 'Ready' }, + { key: 'down', label: 'Down' }, + { key: 'ineligible', label: 'Ineligible' }, + { key: 'draining', label: 'Draining' }, + { key: 'disconnected', label: 'Disconnected' }, + ]; + } + @computed('nodes.[]', 'selectionDatacenter') get optionsDatacenter() { const datacenters = Array.from( @@ -284,50 +195,35 @@ export default class IndexController extends Controller.extend( } @computed( - 'clientFilterToggles', - 'drain_status_draining', - 'drain_status_not_draining', - 'eligibility_eligible', - 'eligibility_ineligible', 'nodes.[]', 'selectionClass', + 'selectionState', 'selectionDatacenter', 'selectionNodePool', 'selectionVersion', - 'selectionVolume', - 'state_disconnected', - 'state_down', - 'state_initializing', - 'state_ready' + 'selectionVolume' ) get filteredNodes() { const { selectionClass: classes, + selectionState: states, selectionDatacenter: datacenters, selectionNodePool: nodePools, selectionVersion: versions, selectionVolume: volumes, } = this; - let nodes = this.nodes; - - // new QP style filtering - for (let category in this.clientFilterToggles) { - nodes = nodes.filter((node) => { - let includeNode = false; - for (let filter of this.clientFilterToggles[category]) { - if (this[filter.qp] && filter.filter(node)) { - includeNode = true; - break; - } - } - return includeNode; - }); - } - - return nodes.filter((node) => { + const onlyIneligible = states.includes('ineligible'); + const onlyDraining = states.includes('draining'); + + // states is a composite of node status and other node states + const statuses = states.without('ineligible').without('draining'); + + return this.nodes.filter((node) => { if (classes.length && !classes.includes(node.get('nodeClass'))) return false; + if (statuses.length && !statuses.includes(node.get('status'))) + return false; if (datacenters.length && !datacenters.includes(node.get('datacenter'))) return false; if (versions.length && !versions.includes(node.get('version'))) @@ -341,6 +237,9 @@ export default class IndexController extends Controller.extend( return false; } + if (onlyIneligible && node.get('isEligible')) return false; + if (onlyDraining && !node.get('isDraining')) return false; + return true; }); } @@ -355,16 +254,6 @@ export default class IndexController extends Controller.extend( this.set(queryParam, serialize(selection)); } - @action - handleFilterChange(queryParamValue, option, queryParamLabel) { - if (queryParamValue.includes(option)) { - queryParamValue.removeObject(option); - } else { - queryParamValue.addObject(option); - } - this.set(queryParamLabel, serialize(queryParamValue)); - } - @action gotoNode(node) { this.transitionToRoute('clients.client', node); diff --git a/ui/app/controllers/evaluations/index.js b/ui/app/controllers/evaluations/index.js index d0b29291fced..a52013da7ab4 100644 --- a/ui/app/controllers/evaluations/index.js +++ b/ui/app/controllers/evaluations/index.js @@ -72,8 +72,7 @@ export default class EvaluationsController extends Controller { e instanceof MouseEvent || (e instanceof KeyboardEvent && (e.code === 'Enter' || e.code === 'Space')) || - !e || - e === 'keynav' + !e ) { this.statechart.send('LOAD_EVALUATION', { evaluation }); } diff --git a/ui/app/controllers/exec.js b/ui/app/controllers/exec.js index 974efef2f90f..c55e63ffe878 100644 --- a/ui/app/controllers/exec.js +++ b/ui/app/controllers/exec.js @@ -121,16 +121,10 @@ export default class ExecController extends Controller { 'Customize your command, then hit ‘return’ to run.' ); this.terminal.writeln(''); - - let namespaceCommandString = ''; - if (this.namespace && this.namespace !== 'default') { - namespaceCommandString = `-namespace ${this.namespace} `; - } - this.terminal.write( - `$ nomad alloc exec -i -t ${namespaceCommandString}-task ${escapeTaskName( - taskName - )} ${this.taskState.allocation.shortId} ` + `$ nomad alloc exec -i -t -task ${escapeTaskName(taskName)} ${ + this.taskState.allocation.shortId + } ` ); this.terminal.write(ANSI_WHITE); diff --git a/ui/app/controllers/jobs/run/templates/new.js b/ui/app/controllers/jobs/run/templates/new.js index 59bce48f629e..65ad30d3ff7a 100644 --- a/ui/app/controllers/jobs/run/templates/new.js +++ b/ui/app/controllers/jobs/run/templates/new.js @@ -36,11 +36,6 @@ export default class JobsRunTemplatesNewController extends Controller { ); } - get hasInvalidName() { - let pathNameRegex = new RegExp('^[a-zA-Z0-9-_~/]{1,128}$'); - return !pathNameRegex.test(this.templateName); - } - @action updateKeyValue(key, value) { if (this.model.keyValues.find((kv) => kv.key === key)) { @@ -79,19 +74,9 @@ export default class JobsRunTemplatesNewController extends Controller { this.router.transitionTo('jobs.run.templates'); } catch (e) { - let errorMessage = - 'An unexpected error occurred when saving your Job template.'; - console.log('caught', e); - if (e.errors && e.errors.length > 0) { - const nameInvalidError = e.errors.find((err) => err.status === 400); - if (nameInvalidError) { - errorMessage = nameInvalidError.detail; - } - } - this.notifications.add({ title: 'Job template cannot be saved.', - message: errorMessage, + message: e, color: 'critical', }); } diff --git a/ui/app/controllers/variables/variable/index.js b/ui/app/controllers/variables/variable/index.js index b2b6090927d7..b9a89f38297f 100644 --- a/ui/app/controllers/variables/variable/index.js +++ b/ui/app/controllers/variables/variable/index.js @@ -37,10 +37,6 @@ export default class VariablesVariableIndexController extends Controller { this.isDeleting = false; } - @action copyVariable() { - navigator.clipboard.writeText(JSON.stringify(this.model.items, null, 2)); - } - @task(function* () { try { yield this.model.deleteRecord(); diff --git a/ui/app/styles/components/actions.scss b/ui/app/styles/components/actions.scss index 860290b5b93c..bf61e4161c5e 100644 --- a/ui/app/styles/components/actions.scss +++ b/ui/app/styles/components/actions.scss @@ -36,9 +36,6 @@ padding-left: 0; padding-right: 0; border-width: 0; - span { - text-align: left; - } } } } @@ -128,7 +125,6 @@ height: 200px; border-radius: 6px; resize: vertical; - position: relative; pre { background-color: transparent; color: unset; @@ -142,15 +138,6 @@ margin-top: -1px; visibility: hidden; } - .copy-button { - position: sticky; - top: 0.5rem; - margin-right: 0.5rem; - margin-left: auto; - width: max-content; - height: 32px; - margin-bottom: -32px; - } } } diff --git a/ui/app/styles/components/variables.scss b/ui/app/styles/components/variables.scss index 2e341987128b..b0c5c9810e51 100644 --- a/ui/app/styles/components/variables.scss +++ b/ui/app/styles/components/variables.scss @@ -5,18 +5,28 @@ .section.single-variable { margin-top: 1.5rem; -} -$hdsLabelTopOffset: 26px; -$hdsInputHeight: 35px; + .back-link { + text-decoration: none; + color: #363636; + position: relative; + top: 4px; + } +} .variable-title { - margin-bottom: 2rem; - .hds-page-header__main { - flex-direction: unset; + .toggle { + font-size: 0.8rem; + margin-left: 1rem; + position: relative; + top: -0.25rem; + .toggler { + margin-right: 0.25rem; + } } - .copy-variable span { - color: var(--token-color-foreground-primary); + .copy-button { + position: relative; + top: 3px; } } @@ -25,6 +35,18 @@ $hdsInputHeight: 35px; margin-bottom: 1rem; } + .path-input { + height: 2.25em; + + &:disabled { + background-color: #f5f5f5; + } + &.error { + color: $red; + border-color: $red; + } + } + .duplicate-path-error { position: relative; animation: slide-in 0.3s ease-out; @@ -34,21 +56,13 @@ $hdsInputHeight: 35px; display: grid; grid-template-columns: 6fr 1fr; gap: 0 1rem; - align-items: start; - .namespace-dropdown { - white-space: nowrap; - width: auto; - position: relative; - top: $hdsLabelTopOffset; - height: $hdsInputHeight; - } } .key-value { display: grid; grid-template-columns: 1fr 4fr 130px; gap: 0 1rem; - align-items: start; + align-items: end; input.error { color: $red; @@ -63,12 +77,6 @@ $hdsInputHeight: 35px; } } - .delete-entry-button { - position: relative; - top: $hdsLabelTopOffset; - height: $hdsInputHeight; - } - button.show-hide-values { height: 100%; box-shadow: none; @@ -123,6 +131,11 @@ $hdsInputHeight: 35px; grid-auto-columns: max-content; grid-auto-flow: column; gap: 1rem; + + .button.is-info.is-inverted.add-more[disabled] { + border-color: #dbdbdb; + box-shadow: 0 2px 0 0 rgb(122 122 122 / 20%); + } } } @@ -139,8 +152,20 @@ table.path-tree { } } -.related-entities { - margin-bottom: 2rem; +.section .notification.related-entities { + --blue: #1563ff; + display: flex; + align-items: center; + gap: 0.5rem; + &.notification { + align-items: center; + } + a { + color: $blue; + display: inline-flex; + align-items: center; + gap: 0.25rem; + } } .related-entities-hint { @@ -153,6 +178,25 @@ table.path-tree { } } +.job-template-hint { + margin-top: 0.5rem; + code { + background-color: #eee; + padding: 0.25rem; + } + .copy-button { + display: inline-block; + padding-left: 0; + position: relative; + top: -5px; + button, + .button { + background-color: transparent; + padding-right: 0.25rem; + } + } +} + table.variable-items { // table-layout: fixed; td.value-cell { diff --git a/ui/app/styles/core/table.scss b/ui/app/styles/core/table.scss index b98f82cc3cf2..7baede0e05d3 100644 --- a/ui/app/styles/core/table.scss +++ b/ui/app/styles/core/table.scss @@ -110,12 +110,6 @@ white-space: nowrap; } - &.node-status-badges { - .hds-badge__text { - white-space: nowrap; - } - } - &.is-narrow { padding: 1.25em 0 1.25em 0.5em; diff --git a/ui/app/templates/allocations/allocation/task/index.hbs b/ui/app/templates/allocations/allocation/task/index.hbs index abeade2fee13..5b4221061617 100644 --- a/ui/app/templates/allocations/allocation/task/index.hbs +++ b/ui/app/templates/allocations/allocation/task/index.hbs @@ -223,7 +223,7 @@
diff --git a/ui/app/templates/clients/index.hbs b/ui/app/templates/clients/index.hbs index cb30abbd916e..f8f0f9ae2645 100644 --- a/ui/app/templates/clients/index.hbs +++ b/ui/app/templates/clients/index.hbs @@ -18,190 +18,52 @@ /> {{/if}}
- - - - +
+ - - {{#each this.clientFilterToggles.state as |option|}} - - {{capitalize option.label}} - - {{/each}} - - - {{#each this.clientFilterToggles.eligibility as |option|}} - - {{capitalize option.label}} - - {{/each}} - - - {{#each this.clientFilterToggles.drainStatus as |option|}} - - {{capitalize option.label}} - - {{/each}} - - - - - {{#each this.optionsNodePool key="label" as |option|}} - - {{option.label}} - - {{else}} - - No Node Pool filters - - {{/each}} - - - - - {{#each this.optionsClass key="label" as |option|}} - - {{option.label}} - - {{else}} - - No Class filters - - {{/each}} - - - - - {{#each this.optionsDatacenter key="label" as |option|}} - - {{option.label}} - - {{else}} - - No Datacenter filters - - {{/each}} - - - - - - {{#each this.optionsVersion key="label" as |option|}} - - {{option.label}} - - {{else}} - - No Version filters - - {{/each}} - - - - - {{#each this.optionsVolume key="label" as |option|}} - - {{option.label}} - - {{else}} - - No Volume filters - - {{/each}} - - +
+
{{#if this.sortedNodes}} Name - State + State Address Node Pool Datacenter diff --git a/ui/app/templates/components/client-node-row.hbs b/ui/app/templates/components/client-node-row.hbs index 09e2142fd2b6..66780e7bc335 100644 --- a/ui/app/templates/components/client-node-row.hbs +++ b/ui/app/templates/components/client-node-row.hbs @@ -12,41 +12,15 @@ {{this.node.shortId}} {{this.node.name}} - - - - {{#if this.node.isEligible}} - - {{else}} - - {{/if}} - - {{#if this.node.isDraining}} - - {{else}} + + - {{/if}} + {{this.node.httpAddr}} diff --git a/ui/app/templates/evaluations/index.hbs b/ui/app/templates/evaluations/index.hbs index 6b81b4cd0ebe..d850a880dbf7 100644 --- a/ui/app/templates/evaluations/index.hbs +++ b/ui/app/templates/evaluations/index.hbs @@ -86,7 +86,7 @@ {{row.model.shortId}} diff --git a/ui/app/templates/exec.hbs b/ui/app/templates/exec.hbs index 31ad2e7fb0a5..8a1e503bf302 100644 --- a/ui/app/templates/exec.hbs +++ b/ui/app/templates/exec.hbs @@ -58,6 +58,6 @@ {{/each}} - + {{/if}} \ No newline at end of file diff --git a/ui/app/templates/jobs/run/templates/new.hbs b/ui/app/templates/jobs/run/templates/new.hbs index 1ad146067f9c..1a59f90e7194 100644 --- a/ui/app/templates/jobs/run/templates/new.hbs +++ b/ui/app/templates/jobs/run/templates/new.hbs @@ -28,11 +28,6 @@ There is already a templated named {{this.templateName}}.

{{/if}} - {{#if this.hasInvalidName}} -

- Template name must contain only alphanumeric or "-", "_", "~", or "/" characters, and be fewer than 128 characters in length. -

- {{/if}} {{#if this.system.shouldShowNamespaces}}